blob: a542b5e583503ea8539324ebb1a3e729ea625ec5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
Thomas Gleixnera3c901b2018-11-25 19:33:39 +010011#include <linux/sched/smt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/unistd.h>
13#include <linux/cpu.h>
Anton Vorontsovcb792952012-05-31 16:26:22 -070014#include <linux/oom.h>
15#include <linux/rcupdate.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040016#include <linux/export.h>
Anton Vorontsove4cc2f82012-05-31 16:26:26 -070017#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/kthread.h>
19#include <linux/stop_machine.h>
Ingo Molnar81615b62006-06-26 00:24:32 -070020#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/gfp.h>
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +010022#include <linux/suspend.h>
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053023#include <linux/lockdep.h>
Preeti U Murthy345527b2015-03-30 14:59:19 +053024#include <linux/tick.h>
Thomas Gleixnera8994182015-07-05 17:12:30 +000025#include <linux/irq.h>
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000026#include <linux/smpboot.h>
Richard Weinbergere6d49892016-08-18 14:57:17 +020027#include <linux/relay.h>
Sebastian Andrzej Siewior6731d4f2016-08-23 14:53:19 +020028#include <linux/slab.h>
Thomas Gleixnercff7d372016-02-26 18:43:28 +000029
Todd E Brandtbb3632c2014-06-06 05:40:17 -070030#include <trace/events/power.h>
Thomas Gleixnercff7d372016-02-26 18:43:28 +000031#define CREATE_TRACE_POINTS
32#include <trace/events/cpuhp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Thomas Gleixner38498a62012-04-20 13:05:44 +000034#include "smpboot.h"
35
Thomas Gleixnercff7d372016-02-26 18:43:28 +000036/**
37 * cpuhp_cpu_state - Per cpu hotplug state storage
38 * @state: The current cpu state
39 * @target: The target state
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000040 * @thread: Pointer to the hotplug thread
41 * @should_run: Thread should execute
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +020042 * @rollback: Perform a rollback
Thomas Gleixnera7246322016-08-12 19:49:38 +020043 * @single: Single callback invocation
44 * @bringup: Single callback bringup or teardown selector
45 * @cb_state: The state for a single callback (install/uninstall)
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000046 * @result: Result of the operation
47 * @done: Signal completion to the issuer of the task
Thomas Gleixnercff7d372016-02-26 18:43:28 +000048 */
49struct cpuhp_cpu_state {
50 enum cpuhp_state state;
51 enum cpuhp_state target;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000052#ifdef CONFIG_SMP
53 struct task_struct *thread;
54 bool should_run;
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +020055 bool rollback;
Thomas Gleixnera7246322016-08-12 19:49:38 +020056 bool single;
57 bool bringup;
Thomas Gleixner8438e492018-06-29 16:05:48 +020058 bool booted_once;
Thomas Gleixnercf392d12016-08-12 19:49:39 +020059 struct hlist_node *node;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000060 enum cpuhp_state cb_state;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000061 int result;
62 struct completion done;
63#endif
Thomas Gleixnercff7d372016-02-26 18:43:28 +000064};
65
66static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
67
Thomas Gleixnerc198e222017-05-24 10:15:43 +020068#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
69static struct lock_class_key cpuhp_state_key;
70static struct lockdep_map cpuhp_state_lock_map =
71 STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key);
72#endif
73
Thomas Gleixnercff7d372016-02-26 18:43:28 +000074/**
75 * cpuhp_step - Hotplug state machine step
76 * @name: Name of the step
77 * @startup: Startup function of the step
78 * @teardown: Teardown function of the step
79 * @skip_onerr: Do not invoke the functions on error rollback
80 * Will go away once the notifiers are gone
Thomas Gleixner757c9892016-02-26 18:43:32 +000081 * @cant_stop: Bringup/teardown can't be stopped at this step
Thomas Gleixnercff7d372016-02-26 18:43:28 +000082 */
83struct cpuhp_step {
Thomas Gleixnercf392d12016-08-12 19:49:39 +020084 const char *name;
85 union {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +020086 int (*single)(unsigned int cpu);
87 int (*multi)(unsigned int cpu,
88 struct hlist_node *node);
89 } startup;
Thomas Gleixnercf392d12016-08-12 19:49:39 +020090 union {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +020091 int (*single)(unsigned int cpu);
92 int (*multi)(unsigned int cpu,
93 struct hlist_node *node);
94 } teardown;
Thomas Gleixnercf392d12016-08-12 19:49:39 +020095 struct hlist_head list;
96 bool skip_onerr;
97 bool cant_stop;
98 bool multi_instance;
Thomas Gleixnercff7d372016-02-26 18:43:28 +000099};
100
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +0000101static DEFINE_MUTEX(cpuhp_state_mutex);
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000102static struct cpuhp_step cpuhp_bp_states[];
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000103static struct cpuhp_step cpuhp_ap_states[];
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000104
Thomas Gleixnera7246322016-08-12 19:49:38 +0200105static bool cpuhp_is_ap_state(enum cpuhp_state state)
106{
107 /*
108 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
109 * purposes as that state is handled explicitly in cpu_down.
110 */
111 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
112}
113
114static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
115{
116 struct cpuhp_step *sp;
117
118 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
119 return sp + state;
120}
121
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000122/**
123 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
124 * @cpu: The cpu for which the callback should be invoked
125 * @step: The step in the state machine
Thomas Gleixnera7246322016-08-12 19:49:38 +0200126 * @bringup: True if the bringup callback should be invoked
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000127 *
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200128 * Called from cpu hotplug and from the state register machinery.
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000129 */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200130static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200131 bool bringup, struct hlist_node *node)
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000132{
133 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Thomas Gleixnera7246322016-08-12 19:49:38 +0200134 struct cpuhp_step *step = cpuhp_get_step(state);
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200135 int (*cbm)(unsigned int cpu, struct hlist_node *node);
136 int (*cb)(unsigned int cpu);
137 int ret, cnt;
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000138
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200139 if (!step->multi_instance) {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200140 cb = bringup ? step->startup.single : step->teardown.single;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200141 if (!cb)
142 return 0;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200143 trace_cpuhp_enter(cpu, st->target, state, cb);
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000144 ret = cb(cpu);
Thomas Gleixnera7246322016-08-12 19:49:38 +0200145 trace_cpuhp_exit(cpu, st->state, state, ret);
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200146 return ret;
147 }
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200148 cbm = bringup ? step->startup.multi : step->teardown.multi;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200149 if (!cbm)
150 return 0;
151
152 /* Single invocation for instance add/remove */
153 if (node) {
154 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
155 ret = cbm(cpu, node);
156 trace_cpuhp_exit(cpu, st->state, state, ret);
157 return ret;
158 }
159
160 /* State transition. Invoke on all instances */
161 cnt = 0;
162 hlist_for_each(node, &step->list) {
163 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
164 ret = cbm(cpu, node);
165 trace_cpuhp_exit(cpu, st->state, state, ret);
166 if (ret)
167 goto err;
168 cnt++;
169 }
170 return 0;
171err:
172 /* Rollback the instances if one failed */
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200173 cbm = !bringup ? step->startup.multi : step->teardown.multi;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200174 if (!cbm)
175 return ret;
176
177 hlist_for_each(node, &step->list) {
178 if (!cnt--)
179 break;
180 cbm(cpu, node);
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000181 }
182 return ret;
183}
184
Rusty Russell98a79d62008-12-13 21:19:41 +1030185#ifdef CONFIG_SMP
Rusty Russellb3199c02008-12-30 09:05:14 +1030186/* Serializes the updates to cpu_online_mask, cpu_present_mask */
Linus Torvaldsaa953872006-07-23 12:12:16 -0700187static DEFINE_MUTEX(cpu_add_remove_lock);
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000188bool cpuhp_tasks_frozen;
189EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700191/*
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530192 * The following two APIs (cpu_maps_update_begin/done) must be used when
193 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
194 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
195 * hotplug callback (un)registration performed using __register_cpu_notifier()
196 * or __unregister_cpu_notifier().
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700197 */
198void cpu_maps_update_begin(void)
199{
200 mutex_lock(&cpu_add_remove_lock);
201}
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530202EXPORT_SYMBOL(cpu_notifier_register_begin);
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700203
204void cpu_maps_update_done(void)
205{
206 mutex_unlock(&cpu_add_remove_lock);
207}
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530208EXPORT_SYMBOL(cpu_notifier_register_done);
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700209
Daniel J Blueman5c113fb2010-06-01 12:15:11 +0100210static RAW_NOTIFIER_HEAD(cpu_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700212/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
213 * Should always be manipulated under cpu_add_remove_lock
214 */
215static int cpu_hotplug_disabled;
216
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700217#ifdef CONFIG_HOTPLUG_CPU
218
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100219static struct {
220 struct task_struct *active_writer;
David Hildenbrand87af9e72014-12-12 10:11:44 +0100221 /* wait queue to wake up the active_writer */
222 wait_queue_head_t wq;
223 /* verifies that no writer will get active while readers are active */
224 struct mutex lock;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100225 /*
226 * Also blocks the new readers during
227 * an ongoing cpu hotplug operation.
228 */
David Hildenbrand87af9e72014-12-12 10:11:44 +0100229 atomic_t refcount;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530230
231#ifdef CONFIG_DEBUG_LOCK_ALLOC
232 struct lockdep_map dep_map;
233#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -0700234} cpu_hotplug = {
235 .active_writer = NULL,
David Hildenbrand87af9e72014-12-12 10:11:44 +0100236 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
Linus Torvalds31950eb2009-06-22 21:18:12 -0700237 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530238#ifdef CONFIG_DEBUG_LOCK_ALLOC
Joonas Lahtinena705e072016-10-12 13:18:56 +0300239 .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530240#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -0700241};
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100242
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530243/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
244#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
Paul E. McKenneydd56af42014-08-25 20:25:06 -0700245#define cpuhp_lock_acquire_tryread() \
246 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530247#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
248#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
249
Paul E. McKenney62db99f2014-10-22 14:51:49 -0700250
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100251void get_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -0800252{
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100253 might_sleep();
254 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -0700255 return;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530256 cpuhp_lock_acquire_read();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100257 mutex_lock(&cpu_hotplug.lock);
David Hildenbrand87af9e72014-12-12 10:11:44 +0100258 atomic_inc(&cpu_hotplug.refcount);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100259 mutex_unlock(&cpu_hotplug.lock);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800260}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100261EXPORT_SYMBOL_GPL(get_online_cpus);
Ashok Raj90d45d12005-11-08 21:34:24 -0800262
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100263void put_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -0800264{
David Hildenbrand87af9e72014-12-12 10:11:44 +0100265 int refcount;
266
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100267 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -0700268 return;
Srivatsa S. Bhat075663d2012-10-08 16:28:20 -0700269
David Hildenbrand87af9e72014-12-12 10:11:44 +0100270 refcount = atomic_dec_return(&cpu_hotplug.refcount);
271 if (WARN_ON(refcount < 0)) /* try to fix things up */
272 atomic_inc(&cpu_hotplug.refcount);
Srivatsa S. Bhat075663d2012-10-08 16:28:20 -0700273
David Hildenbrand87af9e72014-12-12 10:11:44 +0100274 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
275 wake_up(&cpu_hotplug.wq);
276
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530277 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100278
Ashok Raja9d9baa2005-11-28 13:43:46 -0800279}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100280EXPORT_SYMBOL_GPL(put_online_cpus);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800281
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100282/*
283 * This ensures that the hotplug operation can begin only when the
284 * refcount goes to zero.
285 *
286 * Note that during a cpu-hotplug operation, the new readers, if any,
287 * will be blocked by the cpu_hotplug.lock
288 *
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700289 * Since cpu_hotplug_begin() is always called after invoking
290 * cpu_maps_update_begin(), we can be sure that only one writer is active.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100291 *
292 * Note that theoretically, there is a possibility of a livelock:
293 * - Refcount goes to zero, last reader wakes up the sleeping
294 * writer.
295 * - Last reader unlocks the cpu_hotplug.lock.
296 * - A new reader arrives at this moment, bumps up the refcount.
297 * - The writer acquires the cpu_hotplug.lock finds the refcount
298 * non zero and goes to sleep again.
299 *
300 * However, this is very difficult to achieve in practice since
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100301 * get_online_cpus() not an api which is called all that often.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100302 *
303 */
Toshi Kanib9d10be2013-08-12 09:45:53 -0600304void cpu_hotplug_begin(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100305{
David Hildenbrand87af9e72014-12-12 10:11:44 +0100306 DEFINE_WAIT(wait);
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700307
David Hildenbrand87af9e72014-12-12 10:11:44 +0100308 cpu_hotplug.active_writer = current;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530309 cpuhp_lock_acquire();
David Hildenbrand87af9e72014-12-12 10:11:44 +0100310
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700311 for (;;) {
312 mutex_lock(&cpu_hotplug.lock);
David Hildenbrand87af9e72014-12-12 10:11:44 +0100313 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
314 if (likely(!atomic_read(&cpu_hotplug.refcount)))
315 break;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100316 mutex_unlock(&cpu_hotplug.lock);
317 schedule();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100318 }
David Hildenbrand87af9e72014-12-12 10:11:44 +0100319 finish_wait(&cpu_hotplug.wq, &wait);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100320}
321
Toshi Kanib9d10be2013-08-12 09:45:53 -0600322void cpu_hotplug_done(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100323{
324 cpu_hotplug.active_writer = NULL;
325 mutex_unlock(&cpu_hotplug.lock);
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530326 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100327}
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700328
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700329/*
330 * Wait for currently running CPU hotplug operations to complete (if any) and
331 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
332 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
333 * hotplug path before performing hotplug operations. So acquiring that lock
334 * guarantees mutual exclusion from any currently running hotplug operations.
335 */
336void cpu_hotplug_disable(void)
337{
338 cpu_maps_update_begin();
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700339 cpu_hotplug_disabled++;
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700340 cpu_maps_update_done();
341}
Vitaly Kuznetsov32145c42015-08-05 00:52:47 -0700342EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700343
Lianwei Wang01b41152016-06-09 23:43:28 -0700344static void __cpu_hotplug_enable(void)
345{
346 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
347 return;
348 cpu_hotplug_disabled--;
349}
350
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700351void cpu_hotplug_enable(void)
352{
353 cpu_maps_update_begin();
Lianwei Wang01b41152016-06-09 23:43:28 -0700354 __cpu_hotplug_enable();
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700355 cpu_maps_update_done();
356}
Vitaly Kuznetsov32145c42015-08-05 00:52:47 -0700357EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
Toshi Kanib9d10be2013-08-12 09:45:53 -0600358#endif /* CONFIG_HOTPLUG_CPU */
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700359
Thomas Gleixnera3c901b2018-11-25 19:33:39 +0100360/*
361 * Architectures that need SMT-specific errata handling during SMT hotplug
362 * should override this.
363 */
364void __weak arch_smt_update(void) { }
365
Thomas Gleixner8438e492018-06-29 16:05:48 +0200366#ifdef CONFIG_HOTPLUG_SMT
367enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
Konrad Rzeszutek Wilka0695af2018-06-20 11:29:53 -0400368EXPORT_SYMBOL_GPL(cpu_smt_control);
Thomas Gleixner8438e492018-06-29 16:05:48 +0200369
Thomas Gleixnerc504b9f2018-08-07 08:19:57 +0200370static bool cpu_smt_available __read_mostly;
371
Jiri Kosinaa69c5e02018-07-13 16:23:23 +0200372void __init cpu_smt_disable(bool force)
Thomas Gleixner8438e492018-06-29 16:05:48 +0200373{
Jiri Kosinaa69c5e02018-07-13 16:23:23 +0200374 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
375 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
376 return;
377
378 if (force) {
Thomas Gleixner8438e492018-06-29 16:05:48 +0200379 pr_info("SMT: Force disabled\n");
380 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
Jiri Kosinaa69c5e02018-07-13 16:23:23 +0200381 } else {
Borislav Petkov6270cc32018-10-04 19:22:27 +0200382 pr_info("SMT: disabled\n");
Jiri Kosinaa69c5e02018-07-13 16:23:23 +0200383 cpu_smt_control = CPU_SMT_DISABLED;
Thomas Gleixner8438e492018-06-29 16:05:48 +0200384 }
Jiri Kosinaa69c5e02018-07-13 16:23:23 +0200385}
386
Thomas Gleixner929d3b22018-07-13 16:23:24 +0200387/*
388 * The decision whether SMT is supported can only be done after the full
Thomas Gleixnerc504b9f2018-08-07 08:19:57 +0200389 * CPU identification. Called from architecture code before non boot CPUs
390 * are brought up.
391 */
392void __init cpu_smt_check_topology_early(void)
393{
394 if (!topology_smt_supported())
395 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
396}
397
398/*
399 * If SMT was disabled by BIOS, detect it here, after the CPUs have been
400 * brought online. This ensures the smt/l1tf sysfs entries are consistent
401 * with reality. cpu_smt_available is set to true during the bringup of non
402 * boot CPUs when a SMT sibling is detected. Note, this may overwrite
403 * cpu_smt_control's previous setting.
Thomas Gleixner929d3b22018-07-13 16:23:24 +0200404 */
405void __init cpu_smt_check_topology(void)
406{
Thomas Gleixnerc504b9f2018-08-07 08:19:57 +0200407 if (!cpu_smt_available)
Thomas Gleixner929d3b22018-07-13 16:23:24 +0200408 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
409}
410
Jiri Kosinaa69c5e02018-07-13 16:23:23 +0200411static int __init smt_cmdline_disable(char *str)
412{
413 cpu_smt_disable(str && !strcmp(str, "force"));
Thomas Gleixner8438e492018-06-29 16:05:48 +0200414 return 0;
415}
416early_param("nosmt", smt_cmdline_disable);
417
418static inline bool cpu_smt_allowed(unsigned int cpu)
419{
Thomas Gleixnerc504b9f2018-08-07 08:19:57 +0200420 if (topology_is_primary_thread(cpu))
Thomas Gleixner8438e492018-06-29 16:05:48 +0200421 return true;
422
Thomas Gleixnerc504b9f2018-08-07 08:19:57 +0200423 /*
424 * If the CPU is not a 'primary' thread and the booted_once bit is
425 * set then the processor has SMT support. Store this information
426 * for the late check of SMT support in cpu_smt_check_topology().
427 */
428 if (per_cpu(cpuhp_state, cpu).booted_once)
429 cpu_smt_available = true;
430
431 if (cpu_smt_control == CPU_SMT_ENABLED)
Thomas Gleixner8438e492018-06-29 16:05:48 +0200432 return true;
433
434 /*
435 * On x86 it's required to boot all logical CPUs at least once so
436 * that the init code can get a chance to set CR4.MCE on each
437 * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
438 * core will shutdown the machine.
439 */
440 return !per_cpu(cpuhp_state, cpu).booted_once;
441}
442#else
443static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
444#endif
445
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446/* Need to know about CPUs going up/down? */
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200447int register_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448{
Neil Brownbd5349c2006-10-17 00:10:35 -0700449 int ret;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100450 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700451 ret = raw_notifier_chain_register(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100452 cpu_maps_update_done();
Neil Brownbd5349c2006-10-17 00:10:35 -0700453 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454}
Chandra Seetharaman65edc682006-06-27 02:54:08 -0700455
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200456int __register_cpu_notifier(struct notifier_block *nb)
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530457{
458 return raw_notifier_chain_register(&cpu_chain, nb);
459}
460
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000461static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700462 int *nr_calls)
463{
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000464 unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
465 void *hcpu = (void *)(long)cpu;
466
Akinobu Mitae6bde732010-05-26 14:43:29 -0700467 int ret;
468
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000469 ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700470 nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700471
472 return notifier_to_errno(ret);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700473}
474
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000475static int cpu_notify(unsigned long val, unsigned int cpu)
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700476{
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000477 return __cpu_notify(val, cpu, -1, NULL);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700478}
479
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200480static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
481{
482 BUG_ON(cpu_notify(val, cpu));
483}
484
Thomas Gleixnerba997462016-02-26 18:43:24 +0000485/* Notifier wrappers for transitioning to state machine */
486static int notify_prepare(unsigned int cpu)
487{
488 int nr_calls = 0;
489 int ret;
490
491 ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
492 if (ret) {
493 nr_calls--;
494 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
495 __func__, cpu);
496 __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
497 }
498 return ret;
499}
500
501static int notify_online(unsigned int cpu)
502{
503 cpu_notify(CPU_ONLINE, cpu);
504 return 0;
505}
506
Thomas Gleixner7b4e4b12017-07-04 22:20:23 +0200507static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st);
508
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000509static int bringup_wait_for_ap(unsigned int cpu)
510{
511 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
512
Thomas Gleixner7b4e4b12017-07-04 22:20:23 +0200513 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000514 wait_for_completion(&st->done);
Thomas Gleixner6b3d13f2017-07-11 22:06:24 +0200515 if (WARN_ON_ONCE((!cpu_online(cpu))))
516 return -ECANCELED;
Thomas Gleixner7b4e4b12017-07-04 22:20:23 +0200517
Peter Zijlstraa594a9e2019-12-10 09:34:54 +0100518 /* Unpark the hotplug thread of the target cpu */
Thomas Gleixner7b4e4b12017-07-04 22:20:23 +0200519 kthread_unpark(st->thread);
520
Thomas Gleixner8438e492018-06-29 16:05:48 +0200521 /*
522 * SMT soft disabling on X86 requires to bring the CPU out of the
523 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
524 * CPU marked itself as booted_once in cpu_notify_starting() so the
525 * cpu_smt_allowed() check will now return false if this is not the
526 * primary sibling.
527 */
528 if (!cpu_smt_allowed(cpu))
529 return -ECANCELED;
530
Thomas Gleixner7b4e4b12017-07-04 22:20:23 +0200531 /* Should we go further up ? */
532 if (st->target > CPUHP_AP_ONLINE_IDLE) {
533 __cpuhp_kick_ap_work(st);
534 wait_for_completion(&st->done);
535 }
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000536 return st->result;
537}
538
Thomas Gleixnerba997462016-02-26 18:43:24 +0000539static int bringup_cpu(unsigned int cpu)
540{
541 struct task_struct *idle = idle_thread_get(cpu);
542 int ret;
543
Boris Ostrovskyaa877172016-08-03 13:22:28 -0400544 /*
545 * Some architectures have to walk the irq descriptors to
546 * setup the vector space for the cpu which comes online.
547 * Prevent irq alloc/free across the bringup.
548 */
549 irq_lock_sparse();
550
Thomas Gleixnerba997462016-02-26 18:43:24 +0000551 /* Arch-specific enabling code. */
552 ret = __cpu_up(cpu, idle);
Boris Ostrovskyaa877172016-08-03 13:22:28 -0400553 irq_unlock_sparse();
Thomas Gleixnerba997462016-02-26 18:43:24 +0000554 if (ret) {
555 cpu_notify(CPU_UP_CANCELED, cpu);
556 return ret;
557 }
Thomas Gleixner7b4e4b12017-07-04 22:20:23 +0200558 return bringup_wait_for_ap(cpu);
Thomas Gleixnerba997462016-02-26 18:43:24 +0000559}
560
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000561/*
562 * Hotplug state machine related functions
563 */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200564static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000565{
566 for (st->state++; st->state < st->target; st->state++) {
Thomas Gleixnera7246322016-08-12 19:49:38 +0200567 struct cpuhp_step *step = cpuhp_get_step(st->state);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000568
569 if (!step->skip_onerr)
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200570 cpuhp_invoke_callback(cpu, st->state, true, NULL);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000571 }
572}
573
574static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
Thomas Gleixnera7246322016-08-12 19:49:38 +0200575 enum cpuhp_state target)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000576{
577 enum cpuhp_state prev_state = st->state;
578 int ret = 0;
579
580 for (; st->state > target; st->state--) {
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200581 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000582 if (ret) {
583 st->target = prev_state;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200584 undo_cpu_down(cpu, st);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000585 break;
586 }
587 }
588 return ret;
589}
590
Thomas Gleixnera7246322016-08-12 19:49:38 +0200591static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000592{
593 for (st->state--; st->state > st->target; st->state--) {
Thomas Gleixnera7246322016-08-12 19:49:38 +0200594 struct cpuhp_step *step = cpuhp_get_step(st->state);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000595
596 if (!step->skip_onerr)
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200597 cpuhp_invoke_callback(cpu, st->state, false, NULL);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000598 }
599}
600
Thomas Gleixnerce4fbb92019-03-26 17:36:05 +0100601static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
602{
603 if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
604 return true;
605 /*
606 * When CPU hotplug is disabled, then taking the CPU down is not
607 * possible because takedown_cpu() and the architecture and
608 * subsystem specific mechanisms are not available. So the CPU
609 * which would be completely unplugged again needs to stay around
610 * in the current state.
611 */
612 return st->state <= CPUHP_BRINGUP_CPU;
613}
614
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000615static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
Thomas Gleixnera7246322016-08-12 19:49:38 +0200616 enum cpuhp_state target)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000617{
618 enum cpuhp_state prev_state = st->state;
619 int ret = 0;
620
621 while (st->state < target) {
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000622 st->state++;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200623 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000624 if (ret) {
Thomas Gleixnerce4fbb92019-03-26 17:36:05 +0100625 if (can_rollback_cpu(st)) {
626 st->target = prev_state;
627 undo_cpu_up(cpu, st);
628 }
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000629 break;
630 }
631 }
632 return ret;
633}
634
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000635/*
636 * The cpu hotplug threads manage the bringup and teardown of the cpus
637 */
638static void cpuhp_create(unsigned int cpu)
639{
640 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
641
642 init_completion(&st->done);
643}
644
645static int cpuhp_should_run(unsigned int cpu)
646{
647 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
648
649 return st->should_run;
650}
651
652/* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
653static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
654{
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000655 enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000656
Thomas Gleixnera7246322016-08-12 19:49:38 +0200657 return cpuhp_down_callbacks(cpu, st, target);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000658}
659
660/* Execute the online startup callbacks. Used to be CPU_ONLINE */
661static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
662{
Thomas Gleixnera7246322016-08-12 19:49:38 +0200663 return cpuhp_up_callbacks(cpu, st, st->target);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000664}
665
666/*
667 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
668 * callbacks when a state gets [un]installed at runtime.
669 */
670static void cpuhp_thread_fun(unsigned int cpu)
671{
672 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
673 int ret = 0;
674
675 /*
676 * Paired with the mb() in cpuhp_kick_ap_work and
677 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
678 */
679 smp_mb();
680 if (!st->should_run)
681 return;
682
683 st->should_run = false;
684
Thomas Gleixnerc198e222017-05-24 10:15:43 +0200685 lock_map_acquire(&cpuhp_state_lock_map);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000686 /* Single callback invocation for [un]install ? */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200687 if (st->single) {
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000688 if (st->cb_state < CPUHP_AP_ONLINE) {
689 local_irq_disable();
Thomas Gleixnera7246322016-08-12 19:49:38 +0200690 ret = cpuhp_invoke_callback(cpu, st->cb_state,
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200691 st->bringup, st->node);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000692 local_irq_enable();
693 } else {
Thomas Gleixnera7246322016-08-12 19:49:38 +0200694 ret = cpuhp_invoke_callback(cpu, st->cb_state,
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200695 st->bringup, st->node);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000696 }
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200697 } else if (st->rollback) {
698 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
699
Thomas Gleixnera7246322016-08-12 19:49:38 +0200700 undo_cpu_down(cpu, st);
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200701 /*
702 * This is a momentary workaround to keep the notifier users
703 * happy. Will go away once we got rid of the notifiers.
704 */
705 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
706 st->rollback = false;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000707 } else {
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000708 /* Cannot happen .... */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000709 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000710
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000711 /* Regular hotplug work */
712 if (st->state < st->target)
713 ret = cpuhp_ap_online(cpu, st);
714 else if (st->state > st->target)
715 ret = cpuhp_ap_offline(cpu, st);
716 }
Thomas Gleixnerc198e222017-05-24 10:15:43 +0200717 lock_map_release(&cpuhp_state_lock_map);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000718 st->result = ret;
719 complete(&st->done);
720}
721
722/* Invoke a single callback on a remote cpu */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200723static int
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200724cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
725 struct hlist_node *node)
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000726{
727 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
728
729 if (!cpu_online(cpu))
730 return 0;
731
Thomas Gleixnerc198e222017-05-24 10:15:43 +0200732 lock_map_acquire(&cpuhp_state_lock_map);
733 lock_map_release(&cpuhp_state_lock_map);
734
Thomas Gleixner6a4e2452016-07-13 17:16:03 +0000735 /*
736 * If we are up and running, use the hotplug thread. For early calls
737 * we invoke the thread function directly.
738 */
739 if (!st->thread)
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200740 return cpuhp_invoke_callback(cpu, state, bringup, node);
Thomas Gleixner6a4e2452016-07-13 17:16:03 +0000741
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000742 st->cb_state = state;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200743 st->single = true;
744 st->bringup = bringup;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200745 st->node = node;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200746
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000747 /*
748 * Make sure the above stores are visible before should_run becomes
749 * true. Paired with the mb() above in cpuhp_thread_fun()
750 */
751 smp_mb();
752 st->should_run = true;
753 wake_up_process(st->thread);
754 wait_for_completion(&st->done);
755 return st->result;
756}
757
758/* Regular hotplug invocation of the AP hotplug thread */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000759static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000760{
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000761 st->result = 0;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200762 st->single = false;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000763 /*
764 * Make sure the above stores are visible before should_run becomes
765 * true. Paired with the mb() above in cpuhp_thread_fun()
766 */
767 smp_mb();
768 st->should_run = true;
769 wake_up_process(st->thread);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000770}
771
772static int cpuhp_kick_ap_work(unsigned int cpu)
773{
774 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
775 enum cpuhp_state state = st->state;
776
777 trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
Thomas Gleixnerc198e222017-05-24 10:15:43 +0200778 lock_map_acquire(&cpuhp_state_lock_map);
779 lock_map_release(&cpuhp_state_lock_map);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000780 __cpuhp_kick_ap_work(st);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000781 wait_for_completion(&st->done);
782 trace_cpuhp_exit(cpu, st->state, state, st->result);
783 return st->result;
784}
785
786static struct smp_hotplug_thread cpuhp_threads = {
787 .store = &cpuhp_state.thread,
788 .create = &cpuhp_create,
789 .thread_should_run = cpuhp_should_run,
790 .thread_fn = cpuhp_thread_fun,
791 .thread_comm = "cpuhp/%u",
792 .selfparking = true,
793};
794
795void __init cpuhp_threads_init(void)
796{
797 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
798 kthread_unpark(this_cpu_read(cpuhp_state.thread));
799}
800
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801EXPORT_SYMBOL(register_cpu_notifier);
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530802EXPORT_SYMBOL(__register_cpu_notifier);
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200803void unregister_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804{
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100805 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700806 raw_notifier_chain_unregister(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100807 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808}
809EXPORT_SYMBOL(unregister_cpu_notifier);
810
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200811void __unregister_cpu_notifier(struct notifier_block *nb)
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530812{
813 raw_notifier_chain_unregister(&cpu_chain, nb);
814}
815EXPORT_SYMBOL(__unregister_cpu_notifier);
816
Michal Hocko56eaecc2016-12-07 14:54:38 +0100817#ifdef CONFIG_HOTPLUG_CPU
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700818/**
819 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
820 * @cpu: a CPU id
821 *
822 * This function walks all processes, finds a valid mm struct for each one and
823 * then clears a corresponding bit in mm's cpumask. While this all sounds
824 * trivial, there are various non-obvious corner cases, which this function
825 * tries to solve in a safe manner.
826 *
827 * Also note that the function uses a somewhat relaxed locking scheme, so it may
828 * be called only for an already offlined CPU.
829 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700830void clear_tasks_mm_cpumask(int cpu)
831{
832 struct task_struct *p;
833
834 /*
835 * This function is called after the cpu is taken down and marked
836 * offline, so its not like new tasks will ever get this cpu set in
837 * their mm mask. -- Peter Zijlstra
838 * Thus, we may use rcu_read_lock() here, instead of grabbing
839 * full-fledged tasklist_lock.
840 */
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700841 WARN_ON(cpu_online(cpu));
Anton Vorontsovcb792952012-05-31 16:26:22 -0700842 rcu_read_lock();
843 for_each_process(p) {
844 struct task_struct *t;
845
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700846 /*
847 * Main thread might exit, but other threads may still have
848 * a valid mm. Find one.
849 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700850 t = find_lock_task_mm(p);
851 if (!t)
852 continue;
853 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
854 task_unlock(t);
855 }
856 rcu_read_unlock();
857}
858
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400859static inline void check_for_tasks(int dead_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860{
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400861 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
Oleg Nesterova75a6062015-09-10 15:07:50 +0200863 read_lock(&tasklist_lock);
864 for_each_process_thread(g, p) {
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400865 if (!p->on_rq)
866 continue;
867 /*
868 * We do the check with unlocked task_rq(p)->lock.
869 * Order the reading to do not warn about a task,
870 * which was running on this cpu in the past, and
871 * it's just been woken on another cpu.
872 */
873 rmb();
874 if (task_cpu(p) != dead_cpu)
875 continue;
876
877 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
878 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
Oleg Nesterova75a6062015-09-10 15:07:50 +0200879 }
880 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881}
882
Thomas Gleixner98458172016-02-26 18:43:25 +0000883static int notify_down_prepare(unsigned int cpu)
884{
885 int err, nr_calls = 0;
886
887 err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
888 if (err) {
889 nr_calls--;
890 __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
891 pr_warn("%s: attempt to take down CPU %u failed\n",
892 __func__, cpu);
893 }
894 return err;
895}
896
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897/* Take this CPU down. */
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200898static int take_cpu_down(void *_param)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899{
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000900 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
901 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000902 int err, cpu = smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 /* Ensure this CPU doesn't handle any more interrupts. */
905 err = __cpu_disable();
906 if (err < 0)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700907 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
Thomas Gleixnera7246322016-08-12 19:49:38 +0200909 /*
910 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
911 * do this step again.
912 */
913 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
914 st->state--;
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000915 /* Invoke the former CPU_DYING callbacks */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200916 for (; st->state > target; st->state--)
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200917 cpuhp_invoke_callback(cpu, st->state, false, NULL);
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000918
Thomas Gleixner52c063d2015-04-03 02:37:24 +0200919 /* Give up timekeeping duties */
920 tick_handover_do_timer();
Thomas Gleixner14e568e2013-01-31 12:11:14 +0000921 /* Park the stopper thread */
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000922 stop_machine_park(cpu);
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700923 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924}
925
Thomas Gleixner98458172016-02-26 18:43:25 +0000926static int takedown_cpu(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927{
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000928 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Thomas Gleixner98458172016-02-26 18:43:25 +0000929 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930
Thomas Gleixner2a58c522016-03-10 20:42:08 +0100931 /* Park the smpboot threads */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000932 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
933
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200934 /*
Thomas Gleixnera8994182015-07-05 17:12:30 +0000935 * Prevent irq alloc/free while the dying cpu reorganizes the
936 * interrupt affinities.
937 */
938 irq_lock_sparse();
939
940 /*
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200941 * So now all preempt/rcu users must observe !cpu_active().
942 */
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000943 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
Rusty Russell04321582008-07-28 12:16:29 -0500944 if (err) {
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200945 /* CPU refused to die */
Thomas Gleixnera8994182015-07-05 17:12:30 +0000946 irq_unlock_sparse();
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200947 /* Unpark the hotplug thread so we can rollback there */
948 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
Thomas Gleixner98458172016-02-26 18:43:25 +0000949 return err;
Satoru Takeuchi8fa1d7d2006-10-28 10:38:57 -0700950 }
Rusty Russell04321582008-07-28 12:16:29 -0500951 BUG_ON(cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100953 /*
Thomas Gleixneree1e7142016-08-18 14:57:16 +0200954 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100955 * runnable tasks from the cpu, there's only the idle task left now
956 * that the migration thread is done doing the stop_machine thing.
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100957 *
958 * Wait for the stop thread to go away.
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100959 */
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000960 wait_for_completion(&st->done);
961 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962
Thomas Gleixnera8994182015-07-05 17:12:30 +0000963 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
964 irq_unlock_sparse();
965
Preeti U Murthy345527b2015-03-30 14:59:19 +0530966 hotplug_cpu__broadcast_tick_pull(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 /* This actually kills the CPU. */
968 __cpu_die(cpu);
969
Thomas Gleixnera49b1162015-04-03 02:38:05 +0200970 tick_cleanup_dead_cpu(cpu);
Thomas Gleixner98458172016-02-26 18:43:25 +0000971 return 0;
972}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973
Thomas Gleixner98458172016-02-26 18:43:25 +0000974static int notify_dead(unsigned int cpu)
975{
976 cpu_notify_nofail(CPU_DEAD, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 check_for_tasks(cpu);
Thomas Gleixner98458172016-02-26 18:43:25 +0000978 return 0;
979}
980
Thomas Gleixner71f87b22016-03-03 10:52:10 +0100981static void cpuhp_complete_idle_dead(void *arg)
982{
983 struct cpuhp_cpu_state *st = arg;
984
985 complete(&st->done);
986}
987
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000988void cpuhp_report_idle_dead(void)
989{
990 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
991
992 BUG_ON(st->state != CPUHP_AP_OFFLINE);
Thomas Gleixner27d50c72016-02-26 18:43:44 +0000993 rcu_report_dead(smp_processor_id());
Thomas Gleixner71f87b22016-03-03 10:52:10 +0100994 st->state = CPUHP_AP_IDLE_DEAD;
995 /*
996 * We cannot call complete after rcu_report_dead() so we delegate it
997 * to an online cpu.
998 */
999 smp_call_function_single(cpumask_first(cpu_online_mask),
1000 cpuhp_complete_idle_dead, st, 0);
Thomas Gleixnere69aab12016-02-26 18:43:43 +00001001}
1002
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001003#else
1004#define notify_down_prepare NULL
1005#define takedown_cpu NULL
1006#define notify_dead NULL
1007#endif
1008
1009#ifdef CONFIG_HOTPLUG_CPU
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001010
Thomas Gleixner98458172016-02-26 18:43:25 +00001011/* Requires cpu_add_remove_lock to be held */
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001012static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1013 enum cpuhp_state target)
Thomas Gleixner98458172016-02-26 18:43:25 +00001014{
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001015 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1016 int prev_state, ret = 0;
1017 bool hasdied = false;
Thomas Gleixner98458172016-02-26 18:43:25 +00001018
1019 if (num_online_cpus() == 1)
1020 return -EBUSY;
1021
Thomas Gleixner757c9892016-02-26 18:43:32 +00001022 if (!cpu_present(cpu))
Thomas Gleixner98458172016-02-26 18:43:25 +00001023 return -EINVAL;
1024
1025 cpu_hotplug_begin();
1026
1027 cpuhp_tasks_frozen = tasks_frozen;
1028
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001029 prev_state = st->state;
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001030 st->target = target;
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001031 /*
1032 * If the current CPU state is in the range of the AP hotplug thread,
1033 * then we need to kick the thread.
1034 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001035 if (st->state > CPUHP_TEARDOWN_CPU) {
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001036 ret = cpuhp_kick_ap_work(cpu);
1037 /*
1038 * The AP side has done the error rollback already. Just
1039 * return the error code..
1040 */
1041 if (ret)
1042 goto out;
1043
1044 /*
1045 * We might have stopped still in the range of the AP hotplug
1046 * thread. Nothing to do anymore.
1047 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001048 if (st->state > CPUHP_TEARDOWN_CPU)
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001049 goto out;
1050 }
1051 /*
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001052 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001053 * to do the further cleanups.
1054 */
Thomas Gleixnera7246322016-08-12 19:49:38 +02001055 ret = cpuhp_down_callbacks(cpu, st, target);
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +02001056 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
1057 st->target = prev_state;
1058 st->rollback = true;
1059 cpuhp_kick_ap_work(cpu);
1060 }
Thomas Gleixner98458172016-02-26 18:43:25 +00001061
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001062 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001063out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001064 cpu_hotplug_done();
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001065 /* This post dead nonsense must die */
1066 if (!ret && hasdied)
Thomas Gleixner090e77c2016-02-26 18:43:23 +00001067 cpu_notify_nofail(CPU_POST_DEAD, cpu);
Thomas Gleixnera3c901b2018-11-25 19:33:39 +01001068 arch_smt_update();
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001069 return ret;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001070}
1071
Thomas Gleixner373b8de2018-05-29 17:49:05 +02001072static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1073{
1074 if (cpu_hotplug_disabled)
1075 return -EBUSY;
1076 return _cpu_down(cpu, 0, target);
1077}
1078
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001079static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001080{
Heiko Carstens9ea09af2008-12-22 12:36:30 +01001081 int err;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001082
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001083 cpu_maps_update_begin();
Thomas Gleixner373b8de2018-05-29 17:49:05 +02001084 err = cpu_down_maps_locked(cpu, target);
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001085 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 return err;
1087}
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001088int cpu_down(unsigned int cpu)
1089{
1090 return do_cpu_down(cpu, CPUHP_OFFLINE);
1091}
Zhang Ruib62b8ef2008-04-29 02:35:56 -04001092EXPORT_SYMBOL(cpu_down);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093#endif /*CONFIG_HOTPLUG_CPU*/
1094
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001095/**
Thomas Gleixneree1e7142016-08-18 14:57:16 +02001096 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001097 * @cpu: cpu that just started
1098 *
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001099 * It must be called by the arch code on the new cpu, before the new cpu
1100 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1101 */
1102void notify_cpu_starting(unsigned int cpu)
1103{
1104 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1105 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1106
Sebastian Andrzej Siewior0c6d4572016-08-17 14:21:04 +02001107 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
Thomas Gleixner8438e492018-06-29 16:05:48 +02001108 st->booted_once = true;
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001109 while (st->state < target) {
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001110 st->state++;
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001111 cpuhp_invoke_callback(cpu, st->state, true, NULL);
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001112 }
1113}
1114
Thomas Gleixner949338e2016-02-26 18:43:35 +00001115/*
Thomas Gleixner7b4e4b12017-07-04 22:20:23 +02001116 * Called from the idle task. Wake up the controlling task which brings the
Peter Zijlstraa594a9e2019-12-10 09:34:54 +01001117 * hotplug thread of the upcoming CPU up and then delegates the rest of the
1118 * online bringup to the hotplug thread.
Thomas Gleixner949338e2016-02-26 18:43:35 +00001119 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001120void cpuhp_online_idle(enum cpuhp_state state)
Thomas Gleixner949338e2016-02-26 18:43:35 +00001121{
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001122 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001123
1124 /* Happens for the boot cpu */
1125 if (state != CPUHP_AP_ONLINE_IDLE)
1126 return;
1127
Peter Zijlstraa594a9e2019-12-10 09:34:54 +01001128 /*
1129 * Unpart the stopper thread before we start the idle loop (and start
1130 * scheduling); this ensures the stopper task is always available.
1131 */
1132 stop_machine_unpark(smp_processor_id());
1133
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001134 st->state = CPUHP_AP_ONLINE_IDLE;
Thomas Gleixner7b4e4b12017-07-04 22:20:23 +02001135 complete(&st->done);
Thomas Gleixner949338e2016-02-26 18:43:35 +00001136}
1137
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001138/* Requires cpu_add_remove_lock to be held */
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001139static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140{
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001141 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -07001142 struct task_struct *idle;
Thomas Gleixner2e1a3482016-02-26 18:43:37 +00001143 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001145 cpu_hotplug_begin();
Thomas Gleixner38498a62012-04-20 13:05:44 +00001146
Thomas Gleixner757c9892016-02-26 18:43:32 +00001147 if (!cpu_present(cpu)) {
Yasuaki Ishimatsu5e5041f2012-10-23 01:30:54 +02001148 ret = -EINVAL;
1149 goto out;
1150 }
1151
Thomas Gleixner757c9892016-02-26 18:43:32 +00001152 /*
1153 * The caller of do_cpu_up might have raced with another
1154 * caller. Ignore it for now.
1155 */
1156 if (st->state >= target)
Thomas Gleixner38498a62012-04-20 13:05:44 +00001157 goto out;
Thomas Gleixner757c9892016-02-26 18:43:32 +00001158
1159 if (st->state == CPUHP_OFFLINE) {
1160 /* Let it fail before we try to bring the cpu up */
1161 idle = idle_thread_get(cpu);
1162 if (IS_ERR(idle)) {
1163 ret = PTR_ERR(idle);
1164 goto out;
1165 }
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -07001166 }
Thomas Gleixner38498a62012-04-20 13:05:44 +00001167
Thomas Gleixnerba997462016-02-26 18:43:24 +00001168 cpuhp_tasks_frozen = tasks_frozen;
1169
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001170 st->target = target;
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001171 /*
1172 * If the current CPU state is in the range of the AP hotplug thread,
1173 * then we need to kick the thread once more.
1174 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001175 if (st->state > CPUHP_BRINGUP_CPU) {
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001176 ret = cpuhp_kick_ap_work(cpu);
1177 /*
1178 * The AP side has done the error rollback already. Just
1179 * return the error code..
1180 */
1181 if (ret)
1182 goto out;
1183 }
1184
1185 /*
1186 * Try to reach the target state. We max out on the BP at
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001187 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001188 * responsible for bringing it up to the target state.
1189 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001190 target = min((int)target, CPUHP_BRINGUP_CPU);
Thomas Gleixnera7246322016-08-12 19:49:38 +02001191 ret = cpuhp_up_callbacks(cpu, st, target);
Thomas Gleixner38498a62012-04-20 13:05:44 +00001192out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001193 cpu_hotplug_done();
Thomas Gleixnera3c901b2018-11-25 19:33:39 +01001194 arch_smt_update();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 return ret;
1196}
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001197
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001198static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001199{
1200 int err = 0;
minskey guocf234222010-05-24 14:32:41 -07001201
Rusty Russelle0b582e2009-01-01 10:12:28 +10301202 if (!cpu_possible(cpu)) {
Fabian Frederick84117da2014-06-04 16:11:17 -07001203 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1204 cpu);
Chen Gong87d5e022010-03-05 13:42:38 -08001205#if defined(CONFIG_IA64)
Fabian Frederick84117da2014-06-04 16:11:17 -07001206 pr_err("please check additional_cpus= boot parameter\n");
KAMEZAWA Hiroyuki73e753a2007-10-18 23:40:47 -07001207#endif
1208 return -EINVAL;
1209 }
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001210
Toshi Kani01b0f192013-11-12 15:07:25 -08001211 err = try_online_node(cpu_to_node(cpu));
1212 if (err)
1213 return err;
minskey guocf234222010-05-24 14:32:41 -07001214
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001215 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001216
Max Krasnyanskye761b772008-07-15 04:43:49 -07001217 if (cpu_hotplug_disabled) {
1218 err = -EBUSY;
1219 goto out;
1220 }
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02001221 if (!cpu_smt_allowed(cpu)) {
1222 err = -EPERM;
1223 goto out;
1224 }
Max Krasnyanskye761b772008-07-15 04:43:49 -07001225
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001226 err = _cpu_up(cpu, 0, target);
Max Krasnyanskye761b772008-07-15 04:43:49 -07001227out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001228 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001229 return err;
1230}
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001231
1232int cpu_up(unsigned int cpu)
1233{
1234 return do_cpu_up(cpu, CPUHP_ONLINE);
1235}
Paul E. McKenneya513f6b2011-12-11 21:54:45 -08001236EXPORT_SYMBOL_GPL(cpu_up);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001237
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -07001238#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +10301239static cpumask_var_t frozen_cpus;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001240
James Morsed391e552016-08-17 13:50:25 +01001241int freeze_secondary_cpus(int primary)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001242{
James Morsed391e552016-08-17 13:50:25 +01001243 int cpu, error = 0;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001244
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001245 cpu_maps_update_begin();
James Morsed391e552016-08-17 13:50:25 +01001246 if (!cpu_online(primary))
1247 primary = cpumask_first(cpu_online_mask);
Xiaotian Feng9ee349a2009-12-16 18:04:32 +01001248 /*
1249 * We take down all of the non-boot CPUs in one shot to avoid races
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001250 * with the userspace trying to use the CPU hotplug at the same time
1251 */
Rusty Russelle0b582e2009-01-01 10:12:28 +10301252 cpumask_clear(frozen_cpus);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01001253
Fabian Frederick84117da2014-06-04 16:11:17 -07001254 pr_info("Disabling non-boot CPUs ...\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001255 for_each_online_cpu(cpu) {
James Morsed391e552016-08-17 13:50:25 +01001256 if (cpu == primary)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001257 continue;
Todd E Brandtbb3632c2014-06-06 05:40:17 -07001258 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001259 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
Todd E Brandtbb3632c2014-06-06 05:40:17 -07001260 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
Mike Travisfeae3202009-11-17 18:22:13 -06001261 if (!error)
Rusty Russelle0b582e2009-01-01 10:12:28 +10301262 cpumask_set_cpu(cpu, frozen_cpus);
Mike Travisfeae3202009-11-17 18:22:13 -06001263 else {
Fabian Frederick84117da2014-06-04 16:11:17 -07001264 pr_err("Error taking CPU%d down: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001265 break;
1266 }
1267 }
Joseph Cihula86886e52009-06-30 19:31:07 -07001268
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -07001269 if (!error)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001270 BUG_ON(num_online_cpus() > 1);
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -07001271 else
Fabian Frederick84117da2014-06-04 16:11:17 -07001272 pr_err("Non-boot CPUs are not disabled\n");
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -07001273
1274 /*
1275 * Make sure the CPUs won't be enabled by someone else. We need to do
1276 * this even in case of failure as all disable_nonboot_cpus() users are
1277 * supposed to do enable_nonboot_cpus() on the failure path.
1278 */
1279 cpu_hotplug_disabled++;
1280
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001281 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001282 return error;
1283}
1284
Suresh Siddhad0af9ee2009-08-19 18:05:36 -07001285void __weak arch_enable_nonboot_cpus_begin(void)
1286{
1287}
1288
1289void __weak arch_enable_nonboot_cpus_end(void)
1290{
1291}
1292
Mathias Krause71cf5ae2015-07-19 20:06:22 +02001293void enable_nonboot_cpus(void)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001294{
1295 int cpu, error;
1296
1297 /* Allow everyone to use the CPU hotplug again */
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001298 cpu_maps_update_begin();
Lianwei Wang01b41152016-06-09 23:43:28 -07001299 __cpu_hotplug_enable();
Rusty Russelle0b582e2009-01-01 10:12:28 +10301300 if (cpumask_empty(frozen_cpus))
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -07001301 goto out;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001302
Fabian Frederick84117da2014-06-04 16:11:17 -07001303 pr_info("Enabling non-boot CPUs ...\n");
Suresh Siddhad0af9ee2009-08-19 18:05:36 -07001304
1305 arch_enable_nonboot_cpus_begin();
1306
Rusty Russelle0b582e2009-01-01 10:12:28 +10301307 for_each_cpu(cpu, frozen_cpus) {
Todd E Brandtbb3632c2014-06-06 05:40:17 -07001308 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001309 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
Todd E Brandtbb3632c2014-06-06 05:40:17 -07001310 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001311 if (!error) {
Fabian Frederick84117da2014-06-04 16:11:17 -07001312 pr_info("CPU%d is up\n", cpu);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001313 continue;
1314 }
Fabian Frederick84117da2014-06-04 16:11:17 -07001315 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001316 }
Suresh Siddhad0af9ee2009-08-19 18:05:36 -07001317
1318 arch_enable_nonboot_cpus_end();
1319
Rusty Russelle0b582e2009-01-01 10:12:28 +10301320 cpumask_clear(frozen_cpus);
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -07001321out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001322 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001323}
Rusty Russelle0b582e2009-01-01 10:12:28 +10301324
Fenghua Yud7268a32011-11-15 21:59:31 +01001325static int __init alloc_frozen_cpus(void)
Rusty Russelle0b582e2009-01-01 10:12:28 +10301326{
1327 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1328 return -ENOMEM;
1329 return 0;
1330}
1331core_initcall(alloc_frozen_cpus);
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001332
1333/*
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001334 * When callbacks for CPU hotplug notifications are being executed, we must
1335 * ensure that the state of the system with respect to the tasks being frozen
1336 * or not, as reported by the notification, remains unchanged *throughout the
1337 * duration* of the execution of the callbacks.
1338 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1339 *
1340 * This synchronization is implemented by mutually excluding regular CPU
1341 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1342 * Hibernate notifications.
1343 */
1344static int
1345cpu_hotplug_pm_callback(struct notifier_block *nb,
1346 unsigned long action, void *ptr)
1347{
1348 switch (action) {
1349
1350 case PM_SUSPEND_PREPARE:
1351 case PM_HIBERNATION_PREPARE:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -07001352 cpu_hotplug_disable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001353 break;
1354
1355 case PM_POST_SUSPEND:
1356 case PM_POST_HIBERNATION:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -07001357 cpu_hotplug_enable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001358 break;
1359
1360 default:
1361 return NOTIFY_DONE;
1362 }
1363
1364 return NOTIFY_OK;
1365}
1366
1367
Fenghua Yud7268a32011-11-15 21:59:31 +01001368static int __init cpu_hotplug_pm_sync_init(void)
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001369{
Fenghua Yu6e32d472012-11-13 11:32:43 -08001370 /*
1371 * cpu_hotplug_pm_callback has higher priority than x86
1372 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1373 * to disable cpu hotplug to avoid cpu hotplug race.
1374 */
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001375 pm_notifier(cpu_hotplug_pm_callback, 0);
1376 return 0;
1377}
1378core_initcall(cpu_hotplug_pm_sync_init);
1379
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -07001380#endif /* CONFIG_PM_SLEEP_SMP */
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -07001381
1382#endif /* CONFIG_SMP */
Mike Travisb8d317d2008-07-24 18:21:29 -07001383
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001384/* Boot processor state steps */
1385static struct cpuhp_step cpuhp_bp_states[] = {
1386 [CPUHP_OFFLINE] = {
1387 .name = "offline",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001388 .startup.single = NULL,
1389 .teardown.single = NULL,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001390 },
1391#ifdef CONFIG_SMP
1392 [CPUHP_CREATE_THREADS]= {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001393 .name = "threads:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001394 .startup.single = smpboot_create_threads,
1395 .teardown.single = NULL,
Thomas Gleixner757c9892016-02-26 18:43:32 +00001396 .cant_stop = true,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001397 },
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001398 [CPUHP_PERF_PREPARE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001399 .name = "perf:prepare",
1400 .startup.single = perf_event_init_cpu,
1401 .teardown.single = perf_event_exit_cpu,
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001402 },
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001403 [CPUHP_WORKQUEUE_PREP] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001404 .name = "workqueue:prepare",
1405 .startup.single = workqueue_prepare_cpu,
1406 .teardown.single = NULL,
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001407 },
Thomas Gleixner27590dc2016-07-15 10:41:04 +02001408 [CPUHP_HRTIMERS_PREPARE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001409 .name = "hrtimers:prepare",
1410 .startup.single = hrtimers_prepare_cpu,
1411 .teardown.single = hrtimers_dead_cpu,
Thomas Gleixner27590dc2016-07-15 10:41:04 +02001412 },
Richard Weinberger31487f82016-07-13 17:17:01 +00001413 [CPUHP_SMPCFD_PREPARE] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001414 .name = "smpcfd:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001415 .startup.single = smpcfd_prepare_cpu,
1416 .teardown.single = smpcfd_dead_cpu,
Richard Weinberger31487f82016-07-13 17:17:01 +00001417 },
Richard Weinbergere6d49892016-08-18 14:57:17 +02001418 [CPUHP_RELAY_PREPARE] = {
1419 .name = "relay:prepare",
1420 .startup.single = relay_prepare_cpu,
1421 .teardown.single = NULL,
1422 },
Sebastian Andrzej Siewior6731d4f2016-08-23 14:53:19 +02001423 [CPUHP_SLAB_PREPARE] = {
1424 .name = "slab:prepare",
1425 .startup.single = slab_prepare_cpu,
1426 .teardown.single = slab_dead_cpu,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001427 },
Thomas Gleixner4df83742016-07-13 17:17:03 +00001428 [CPUHP_RCUTREE_PREP] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001429 .name = "RCU/tree:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001430 .startup.single = rcutree_prepare_cpu,
1431 .teardown.single = rcutree_dead_cpu,
Thomas Gleixner4df83742016-07-13 17:17:03 +00001432 },
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001433 /*
1434 * Preparatory and dead notifiers. Will be replaced once the notifiers
1435 * are converted to states.
1436 */
1437 [CPUHP_NOTIFY_PREPARE] = {
1438 .name = "notify:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001439 .startup.single = notify_prepare,
1440 .teardown.single = notify_dead,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001441 .skip_onerr = true,
Thomas Gleixner757c9892016-02-26 18:43:32 +00001442 .cant_stop = true,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001443 },
Richard Cochran4fae16d2016-07-27 11:08:18 +02001444 /*
1445 * On the tear-down path, timers_dead_cpu() must be invoked
1446 * before blk_mq_queue_reinit_notify() from notify_dead(),
1447 * otherwise a RCU stall occurs.
1448 */
Thomas Gleixner249d4a92017-12-27 21:37:25 +01001449 [CPUHP_TIMERS_PREPARE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001450 .name = "timers:dead",
Thomas Gleixner249d4a92017-12-27 21:37:25 +01001451 .startup.single = timers_prepare_cpu,
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001452 .teardown.single = timers_dead_cpu,
Richard Cochran4fae16d2016-07-27 11:08:18 +02001453 },
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001454 /* Kicks the plugged cpu into life */
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001455 [CPUHP_BRINGUP_CPU] = {
1456 .name = "cpu:bringup",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001457 .startup.single = bringup_cpu,
1458 .teardown.single = NULL,
Thomas Gleixner757c9892016-02-26 18:43:32 +00001459 .cant_stop = true,
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001460 },
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001461 /*
1462 * Handled on controll processor until the plugged processor manages
1463 * this itself.
1464 */
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001465 [CPUHP_TEARDOWN_CPU] = {
1466 .name = "cpu:teardown",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001467 .startup.single = NULL,
1468 .teardown.single = takedown_cpu,
Thomas Gleixner757c9892016-02-26 18:43:32 +00001469 .cant_stop = true,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001470 },
Thomas Gleixnera7c734142016-07-12 21:59:23 +02001471#else
1472 [CPUHP_BRINGUP_CPU] = { },
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001473#endif
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001474};
1475
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001476/* Application processor state steps */
1477static struct cpuhp_step cpuhp_ap_states[] = {
1478#ifdef CONFIG_SMP
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001479 /* Final state before CPU kills itself */
1480 [CPUHP_AP_IDLE_DEAD] = {
1481 .name = "idle:dead",
1482 },
1483 /*
1484 * Last state before CPU enters the idle loop to die. Transient state
1485 * for synchronization.
1486 */
1487 [CPUHP_AP_OFFLINE] = {
1488 .name = "ap:offline",
1489 .cant_stop = true,
1490 },
Thomas Gleixner9cf72432016-03-10 12:54:09 +01001491 /* First state is scheduler control. Interrupts are disabled */
1492 [CPUHP_AP_SCHED_STARTING] = {
1493 .name = "sched:starting",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001494 .startup.single = sched_cpu_starting,
1495 .teardown.single = sched_cpu_dying,
Thomas Gleixner9cf72432016-03-10 12:54:09 +01001496 },
Thomas Gleixner4df83742016-07-13 17:17:03 +00001497 [CPUHP_AP_RCUTREE_DYING] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001498 .name = "RCU/tree:dying",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001499 .startup.single = NULL,
1500 .teardown.single = rcutree_dying_cpu,
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001501 },
Lai Jiangshanff3d4fd2017-11-28 21:19:53 +08001502 [CPUHP_AP_SMPCFD_DYING] = {
1503 .name = "smpcfd:dying",
1504 .startup.single = NULL,
1505 .teardown.single = smpcfd_dying_cpu,
1506 },
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001507 /* Entry state on starting. Interrupts enabled from here on. Transient
1508 * state for synchronsization */
1509 [CPUHP_AP_ONLINE] = {
1510 .name = "ap:online",
1511 },
1512 /* Handle smpboot threads park/unpark */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001513 [CPUHP_AP_SMPBOOT_THREADS] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001514 .name = "smpboot/threads:online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001515 .startup.single = smpboot_unpark_threads,
Thomas Gleixner93335752018-05-29 19:05:25 +02001516 .teardown.single = smpboot_park_threads,
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001517 },
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001518 [CPUHP_AP_PERF_ONLINE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001519 .name = "perf:online",
1520 .startup.single = perf_event_init_cpu,
1521 .teardown.single = perf_event_exit_cpu,
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001522 },
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001523 [CPUHP_AP_WORKQUEUE_ONLINE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001524 .name = "workqueue:online",
1525 .startup.single = workqueue_online_cpu,
1526 .teardown.single = workqueue_offline_cpu,
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001527 },
Thomas Gleixner4df83742016-07-13 17:17:03 +00001528 [CPUHP_AP_RCUTREE_ONLINE] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001529 .name = "RCU/tree:online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001530 .startup.single = rcutree_online_cpu,
1531 .teardown.single = rcutree_offline_cpu,
Thomas Gleixner4df83742016-07-13 17:17:03 +00001532 },
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001533
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001534 /*
1535 * Online/down_prepare notifiers. Will be removed once the notifiers
1536 * are converted to states.
1537 */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001538 [CPUHP_AP_NOTIFY_ONLINE] = {
1539 .name = "notify:online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001540 .startup.single = notify_online,
1541 .teardown.single = notify_down_prepare,
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +02001542 .skip_onerr = true,
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001543 },
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001544#endif
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001545 /*
1546 * The dynamically registered state space is here
1547 */
1548
Thomas Gleixneraaddd7d2016-03-10 12:54:19 +01001549#ifdef CONFIG_SMP
1550 /* Last state is scheduler control setting the cpu active */
1551 [CPUHP_AP_ACTIVE] = {
1552 .name = "sched:active",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001553 .startup.single = sched_cpu_activate,
1554 .teardown.single = sched_cpu_deactivate,
Thomas Gleixneraaddd7d2016-03-10 12:54:19 +01001555 },
1556#endif
1557
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001558 /* CPU is fully up and running. */
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001559 [CPUHP_ONLINE] = {
1560 .name = "online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001561 .startup.single = NULL,
1562 .teardown.single = NULL,
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001563 },
1564};
1565
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001566/* Sanity check for callbacks */
1567static int cpuhp_cb_check(enum cpuhp_state state)
1568{
1569 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1570 return -EINVAL;
1571 return 0;
1572}
1573
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001574static void cpuhp_store_callbacks(enum cpuhp_state state,
1575 const char *name,
1576 int (*startup)(unsigned int cpu),
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001577 int (*teardown)(unsigned int cpu),
1578 bool multi_instance)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001579{
1580 /* (Un)Install the callbacks for further cpu hotplug operations */
1581 struct cpuhp_step *sp;
1582
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001583 sp = cpuhp_get_step(state);
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001584 sp->startup.single = startup;
1585 sp->teardown.single = teardown;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001586 sp->name = name;
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001587 sp->multi_instance = multi_instance;
1588 INIT_HLIST_HEAD(&sp->list);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001589}
1590
1591static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1592{
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001593 return cpuhp_get_step(state)->teardown.single;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001594}
1595
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001596/*
1597 * Call the startup/teardown function for a step either on the AP or
1598 * on the current CPU.
1599 */
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001600static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1601 struct hlist_node *node)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001602{
Thomas Gleixnera7246322016-08-12 19:49:38 +02001603 struct cpuhp_step *sp = cpuhp_get_step(state);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001604 int ret;
1605
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001606 if ((bringup && !sp->startup.single) ||
1607 (!bringup && !sp->teardown.single))
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001608 return 0;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001609 /*
1610 * The non AP bound callbacks can fail on bringup. On teardown
1611 * e.g. module removal we crash for now.
1612 */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001613#ifdef CONFIG_SMP
1614 if (cpuhp_is_ap_state(state))
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001615 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001616 else
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001617 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001618#else
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001619 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001620#endif
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001621 BUG_ON(ret && !bringup);
1622 return ret;
1623}
1624
1625/*
1626 * Called from __cpuhp_setup_state on a recoverable failure.
1627 *
1628 * Note: The teardown callbacks for rollback are not allowed to fail!
1629 */
1630static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001631 struct hlist_node *node)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001632{
1633 int cpu;
1634
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001635 /* Roll back the already executed steps on the other cpus */
1636 for_each_present_cpu(cpu) {
1637 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1638 int cpustate = st->state;
1639
1640 if (cpu >= failedcpu)
1641 break;
1642
1643 /* Did we invoke the startup call on that cpu ? */
1644 if (cpustate >= state)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001645 cpuhp_issue_call(cpu, state, false, node);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001646 }
1647}
1648
1649/*
1650 * Returns a free for dynamic slot assignment of the Online state. The states
1651 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1652 * by having no name assigned.
1653 */
1654static int cpuhp_reserve_state(enum cpuhp_state state)
1655{
1656 enum cpuhp_state i;
1657
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001658 for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
1659 if (cpuhp_ap_states[i].name)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001660 continue;
1661
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001662 cpuhp_ap_states[i].name = "Reserved";
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001663 return i;
1664 }
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001665 WARN(1, "No more dynamic states available for CPU hotplug\n");
1666 return -ENOSPC;
1667}
1668
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001669int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1670 bool invoke)
1671{
1672 struct cpuhp_step *sp;
1673 int cpu;
1674 int ret;
1675
1676 sp = cpuhp_get_step(state);
1677 if (sp->multi_instance == false)
1678 return -EINVAL;
1679
1680 get_online_cpus();
Sebastian Andrzej Siewior7ad6de42017-03-14 16:06:45 +01001681 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001682
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001683 if (!invoke || !sp->startup.multi)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001684 goto add_node;
1685
1686 /*
1687 * Try to call the startup callback for each present cpu
1688 * depending on the hotplug state of the cpu.
1689 */
1690 for_each_present_cpu(cpu) {
1691 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1692 int cpustate = st->state;
1693
1694 if (cpustate < state)
1695 continue;
1696
1697 ret = cpuhp_issue_call(cpu, state, true, node);
1698 if (ret) {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001699 if (sp->teardown.multi)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001700 cpuhp_rollback_install(cpu, state, node);
1701 goto err;
1702 }
1703 }
1704add_node:
1705 ret = 0;
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001706 hlist_add_head(node, &sp->list);
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001707
1708err:
Sebastian Andrzej Siewior7ad6de42017-03-14 16:06:45 +01001709 mutex_unlock(&cpuhp_state_mutex);
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001710 put_online_cpus();
1711 return ret;
1712}
1713EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1714
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001715/**
1716 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1717 * @state: The state to setup
1718 * @invoke: If true, the startup function is invoked for cpus where
1719 * cpu state >= @state
1720 * @startup: startup callback function
1721 * @teardown: teardown callback function
1722 *
1723 * Returns 0 if successful, otherwise a proper error code
1724 */
1725int __cpuhp_setup_state(enum cpuhp_state state,
1726 const char *name, bool invoke,
1727 int (*startup)(unsigned int cpu),
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001728 int (*teardown)(unsigned int cpu),
1729 bool multi_instance)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001730{
1731 int cpu, ret = 0;
1732 int dyn_state = 0;
1733
1734 if (cpuhp_cb_check(state) || !name)
1735 return -EINVAL;
1736
1737 get_online_cpus();
Sebastian Andrzej Siewior7ad6de42017-03-14 16:06:45 +01001738 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001739
1740 /* currently assignments for the ONLINE state are possible */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001741 if (state == CPUHP_AP_ONLINE_DYN) {
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001742 dyn_state = 1;
1743 ret = cpuhp_reserve_state(state);
1744 if (ret < 0)
1745 goto out;
1746 state = ret;
1747 }
1748
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001749 cpuhp_store_callbacks(state, name, startup, teardown, multi_instance);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001750
1751 if (!invoke || !startup)
1752 goto out;
1753
1754 /*
1755 * Try to call the startup callback for each present cpu
1756 * depending on the hotplug state of the cpu.
1757 */
1758 for_each_present_cpu(cpu) {
1759 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1760 int cpustate = st->state;
1761
1762 if (cpustate < state)
1763 continue;
1764
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001765 ret = cpuhp_issue_call(cpu, state, true, NULL);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001766 if (ret) {
Thomas Gleixnera7246322016-08-12 19:49:38 +02001767 if (teardown)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001768 cpuhp_rollback_install(cpu, state, NULL);
1769 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001770 goto out;
1771 }
1772 }
1773out:
Sebastian Andrzej Siewior7ad6de42017-03-14 16:06:45 +01001774 mutex_unlock(&cpuhp_state_mutex);
1775
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001776 put_online_cpus();
1777 if (!ret && dyn_state)
1778 return state;
1779 return ret;
1780}
1781EXPORT_SYMBOL(__cpuhp_setup_state);
1782
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001783int __cpuhp_state_remove_instance(enum cpuhp_state state,
1784 struct hlist_node *node, bool invoke)
1785{
1786 struct cpuhp_step *sp = cpuhp_get_step(state);
1787 int cpu;
1788
1789 BUG_ON(cpuhp_cb_check(state));
1790
1791 if (!sp->multi_instance)
1792 return -EINVAL;
1793
1794 get_online_cpus();
Sebastian Andrzej Siewior7ad6de42017-03-14 16:06:45 +01001795 mutex_lock(&cpuhp_state_mutex);
1796
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001797 if (!invoke || !cpuhp_get_teardown_cb(state))
1798 goto remove;
1799 /*
1800 * Call the teardown callback for each present cpu depending
1801 * on the hotplug state of the cpu. This function is not
1802 * allowed to fail currently!
1803 */
1804 for_each_present_cpu(cpu) {
1805 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1806 int cpustate = st->state;
1807
1808 if (cpustate >= state)
1809 cpuhp_issue_call(cpu, state, false, node);
1810 }
1811
1812remove:
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001813 hlist_del(node);
1814 mutex_unlock(&cpuhp_state_mutex);
1815 put_online_cpus();
1816
1817 return 0;
1818}
1819EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001820/**
1821 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1822 * @state: The state to remove
1823 * @invoke: If true, the teardown function is invoked for cpus where
1824 * cpu state >= @state
1825 *
1826 * The teardown callback is currently not allowed to fail. Think
1827 * about module removal!
1828 */
1829void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1830{
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001831 struct cpuhp_step *sp = cpuhp_get_step(state);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001832 int cpu;
1833
1834 BUG_ON(cpuhp_cb_check(state));
1835
1836 get_online_cpus();
Sebastian Andrzej Siewior7ad6de42017-03-14 16:06:45 +01001837 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001838
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001839 if (sp->multi_instance) {
1840 WARN(!hlist_empty(&sp->list),
1841 "Error: Removing state %d which has instances left.\n",
1842 state);
1843 goto remove;
1844 }
1845
Thomas Gleixnera7246322016-08-12 19:49:38 +02001846 if (!invoke || !cpuhp_get_teardown_cb(state))
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001847 goto remove;
1848
1849 /*
1850 * Call the teardown callback for each present cpu depending
1851 * on the hotplug state of the cpu. This function is not
1852 * allowed to fail currently!
1853 */
1854 for_each_present_cpu(cpu) {
1855 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1856 int cpustate = st->state;
1857
1858 if (cpustate >= state)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001859 cpuhp_issue_call(cpu, state, false, NULL);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001860 }
1861remove:
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001862 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
Sebastian Andrzej Siewior7ad6de42017-03-14 16:06:45 +01001863 mutex_unlock(&cpuhp_state_mutex);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001864 put_online_cpus();
1865}
1866EXPORT_SYMBOL(__cpuhp_remove_state);
1867
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001868#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1869static ssize_t show_cpuhp_state(struct device *dev,
1870 struct device_attribute *attr, char *buf)
1871{
1872 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1873
1874 return sprintf(buf, "%d\n", st->state);
1875}
1876static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1877
Thomas Gleixner757c9892016-02-26 18:43:32 +00001878static ssize_t write_cpuhp_target(struct device *dev,
1879 struct device_attribute *attr,
1880 const char *buf, size_t count)
1881{
1882 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1883 struct cpuhp_step *sp;
1884 int target, ret;
1885
1886 ret = kstrtoint(buf, 10, &target);
1887 if (ret)
1888 return ret;
1889
1890#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1891 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1892 return -EINVAL;
1893#else
1894 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1895 return -EINVAL;
1896#endif
1897
1898 ret = lock_device_hotplug_sysfs();
1899 if (ret)
1900 return ret;
1901
1902 mutex_lock(&cpuhp_state_mutex);
1903 sp = cpuhp_get_step(target);
1904 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1905 mutex_unlock(&cpuhp_state_mutex);
1906 if (ret)
Sebastian Andrzej Siewior106c77e2017-06-02 16:27:14 +02001907 goto out;
Thomas Gleixner757c9892016-02-26 18:43:32 +00001908
1909 if (st->state < target)
1910 ret = do_cpu_up(dev->id, target);
1911 else
1912 ret = do_cpu_down(dev->id, target);
Sebastian Andrzej Siewior106c77e2017-06-02 16:27:14 +02001913out:
Thomas Gleixner757c9892016-02-26 18:43:32 +00001914 unlock_device_hotplug();
1915 return ret ? ret : count;
1916}
1917
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001918static ssize_t show_cpuhp_target(struct device *dev,
1919 struct device_attribute *attr, char *buf)
1920{
1921 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1922
1923 return sprintf(buf, "%d\n", st->target);
1924}
Thomas Gleixner757c9892016-02-26 18:43:32 +00001925static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001926
1927static struct attribute *cpuhp_cpu_attrs[] = {
1928 &dev_attr_state.attr,
1929 &dev_attr_target.attr,
1930 NULL
1931};
1932
1933static struct attribute_group cpuhp_cpu_attr_group = {
1934 .attrs = cpuhp_cpu_attrs,
1935 .name = "hotplug",
1936 NULL
1937};
1938
1939static ssize_t show_cpuhp_states(struct device *dev,
1940 struct device_attribute *attr, char *buf)
1941{
1942 ssize_t cur, res = 0;
1943 int i;
1944
1945 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixner757c9892016-02-26 18:43:32 +00001946 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001947 struct cpuhp_step *sp = cpuhp_get_step(i);
1948
1949 if (sp->name) {
1950 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1951 buf += cur;
1952 res += cur;
1953 }
1954 }
1955 mutex_unlock(&cpuhp_state_mutex);
1956 return res;
1957}
1958static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1959
1960static struct attribute *cpuhp_cpu_root_attrs[] = {
1961 &dev_attr_states.attr,
1962 NULL
1963};
1964
1965static struct attribute_group cpuhp_cpu_root_attr_group = {
1966 .attrs = cpuhp_cpu_root_attrs,
1967 .name = "hotplug",
1968 NULL
1969};
1970
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02001971#ifdef CONFIG_HOTPLUG_SMT
1972
1973static const char *smt_states[] = {
1974 [CPU_SMT_ENABLED] = "on",
1975 [CPU_SMT_DISABLED] = "off",
1976 [CPU_SMT_FORCE_DISABLED] = "forceoff",
1977 [CPU_SMT_NOT_SUPPORTED] = "notsupported",
1978};
1979
1980static ssize_t
1981show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
1982{
1983 return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
1984}
1985
1986static void cpuhp_offline_cpu_device(unsigned int cpu)
1987{
1988 struct device *dev = get_cpu_device(cpu);
1989
1990 dev->offline = true;
1991 /* Tell user space about the state change */
1992 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
1993}
1994
Thomas Gleixnere7cda2f2018-07-07 11:40:18 +02001995static void cpuhp_online_cpu_device(unsigned int cpu)
1996{
1997 struct device *dev = get_cpu_device(cpu);
1998
1999 dev->offline = false;
2000 /* Tell user space about the state change */
2001 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2002}
2003
Jiri Kosina5bdc5362019-05-30 00:09:39 +02002004int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02002005{
2006 int cpu, ret = 0;
2007
2008 cpu_maps_update_begin();
2009 for_each_online_cpu(cpu) {
2010 if (topology_is_primary_thread(cpu))
2011 continue;
2012 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2013 if (ret)
2014 break;
2015 /*
2016 * As this needs to hold the cpu maps lock it's impossible
2017 * to call device_offline() because that ends up calling
2018 * cpu_down() which takes cpu maps lock. cpu maps lock
2019 * needs to be held as this might race against in kernel
2020 * abusers of the hotplug machinery (thermal management).
2021 *
2022 * So nothing would update device:offline state. That would
2023 * leave the sysfs entry stale and prevent onlining after
2024 * smt control has been changed to 'off' again. This is
2025 * called under the sysfs hotplug lock, so it is properly
2026 * serialized against the regular offline usage.
2027 */
2028 cpuhp_offline_cpu_device(cpu);
2029 }
Zhenzhong Duanf2a02af2019-01-17 02:10:59 -08002030 if (!ret)
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02002031 cpu_smt_control = ctrlval;
2032 cpu_maps_update_done();
2033 return ret;
2034}
2035
Jiri Kosina5bdc5362019-05-30 00:09:39 +02002036int cpuhp_smt_enable(void)
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02002037{
Thomas Gleixnere7cda2f2018-07-07 11:40:18 +02002038 int cpu, ret = 0;
2039
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02002040 cpu_maps_update_begin();
2041 cpu_smt_control = CPU_SMT_ENABLED;
Thomas Gleixnere7cda2f2018-07-07 11:40:18 +02002042 for_each_present_cpu(cpu) {
2043 /* Skip online CPUs and CPUs on offline nodes */
2044 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2045 continue;
2046 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2047 if (ret)
2048 break;
2049 /* See comment in cpuhp_smt_disable() */
2050 cpuhp_online_cpu_device(cpu);
2051 }
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02002052 cpu_maps_update_done();
Thomas Gleixnere7cda2f2018-07-07 11:40:18 +02002053 return ret;
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02002054}
2055
2056static ssize_t
2057store_smt_control(struct device *dev, struct device_attribute *attr,
2058 const char *buf, size_t count)
2059{
2060 int ctrlval, ret;
2061
2062 if (sysfs_streq(buf, "on"))
2063 ctrlval = CPU_SMT_ENABLED;
2064 else if (sysfs_streq(buf, "off"))
2065 ctrlval = CPU_SMT_DISABLED;
2066 else if (sysfs_streq(buf, "forceoff"))
2067 ctrlval = CPU_SMT_FORCE_DISABLED;
2068 else
2069 return -EINVAL;
2070
2071 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2072 return -EPERM;
2073
2074 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2075 return -ENODEV;
2076
2077 ret = lock_device_hotplug_sysfs();
2078 if (ret)
2079 return ret;
2080
2081 if (ctrlval != cpu_smt_control) {
2082 switch (ctrlval) {
2083 case CPU_SMT_ENABLED:
Thomas Gleixnere7cda2f2018-07-07 11:40:18 +02002084 ret = cpuhp_smt_enable();
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02002085 break;
2086 case CPU_SMT_DISABLED:
2087 case CPU_SMT_FORCE_DISABLED:
2088 ret = cpuhp_smt_disable(ctrlval);
2089 break;
2090 }
2091 }
2092
2093 unlock_device_hotplug();
2094 return ret ? ret : count;
2095}
2096static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2097
2098static ssize_t
2099show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2100{
2101 bool active = topology_max_smt_threads() > 1;
2102
2103 return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
2104}
2105static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2106
2107static struct attribute *cpuhp_smt_attrs[] = {
2108 &dev_attr_control.attr,
2109 &dev_attr_active.attr,
2110 NULL
2111};
2112
2113static const struct attribute_group cpuhp_smt_attr_group = {
2114 .attrs = cpuhp_smt_attrs,
2115 .name = "smt",
2116 NULL
2117};
2118
2119static int __init cpu_smt_state_init(void)
2120{
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02002121 return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2122 &cpuhp_smt_attr_group);
2123}
2124
2125#else
2126static inline int cpu_smt_state_init(void) { return 0; }
2127#endif
2128
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00002129static int __init cpuhp_sysfs_init(void)
2130{
2131 int cpu, ret;
2132
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02002133 ret = cpu_smt_state_init();
2134 if (ret)
2135 return ret;
2136
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00002137 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2138 &cpuhp_cpu_root_attr_group);
2139 if (ret)
2140 return ret;
2141
2142 for_each_possible_cpu(cpu) {
2143 struct device *dev = get_cpu_device(cpu);
2144
2145 if (!dev)
2146 continue;
2147 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2148 if (ret)
2149 return ret;
2150 }
2151 return 0;
2152}
2153device_initcall(cpuhp_sysfs_init);
2154#endif
2155
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002156/*
2157 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2158 * represents all NR_CPUS bits binary values of 1<<nr.
2159 *
Rusty Russelle0b582e2009-01-01 10:12:28 +10302160 * It is used by cpumask_of() to get a constant address to a CPU
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002161 * mask value that has a single bit set only.
2162 */
Mike Travisb8d317d2008-07-24 18:21:29 -07002163
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002164/* cpu_bit_bitmap[0] is empty - so we can back into it */
Michael Rodriguez4d519852011-03-22 16:34:07 -07002165#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002166#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2167#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2168#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
Mike Travisb8d317d2008-07-24 18:21:29 -07002169
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002170const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
Mike Travisb8d317d2008-07-24 18:21:29 -07002171
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002172 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2173 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2174#if BITS_PER_LONG > 32
2175 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2176 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
Mike Travisb8d317d2008-07-24 18:21:29 -07002177#endif
2178};
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002179EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
Rusty Russell2d3854a2008-11-05 13:39:10 +11002180
2181const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2182EXPORT_SYMBOL(cpu_all_bits);
Rusty Russellb3199c02008-12-30 09:05:14 +10302183
2184#ifdef CONFIG_INIT_ALL_POSSIBLE
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002185struct cpumask __cpu_possible_mask __read_mostly
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08002186 = {CPU_BITS_ALL};
Rusty Russellb3199c02008-12-30 09:05:14 +10302187#else
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002188struct cpumask __cpu_possible_mask __read_mostly;
Rusty Russellb3199c02008-12-30 09:05:14 +10302189#endif
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002190EXPORT_SYMBOL(__cpu_possible_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +10302191
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002192struct cpumask __cpu_online_mask __read_mostly;
2193EXPORT_SYMBOL(__cpu_online_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +10302194
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002195struct cpumask __cpu_present_mask __read_mostly;
2196EXPORT_SYMBOL(__cpu_present_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +10302197
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002198struct cpumask __cpu_active_mask __read_mostly;
2199EXPORT_SYMBOL(__cpu_active_mask);
Rusty Russell3fa41522008-12-30 09:05:16 +10302200
Rusty Russell3fa41522008-12-30 09:05:16 +10302201void init_cpu_present(const struct cpumask *src)
2202{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08002203 cpumask_copy(&__cpu_present_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +10302204}
2205
2206void init_cpu_possible(const struct cpumask *src)
2207{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08002208 cpumask_copy(&__cpu_possible_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +10302209}
2210
2211void init_cpu_online(const struct cpumask *src)
2212{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08002213 cpumask_copy(&__cpu_online_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +10302214}
Thomas Gleixnercff7d372016-02-26 18:43:28 +00002215
2216/*
2217 * Activate the first processor.
2218 */
2219void __init boot_cpu_init(void)
2220{
2221 int cpu = smp_processor_id();
2222
2223 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2224 set_cpu_online(cpu, true);
2225 set_cpu_active(cpu, true);
2226 set_cpu_present(cpu, true);
2227 set_cpu_possible(cpu, true);
2228}
2229
2230/*
2231 * Must be called _AFTER_ setting up the per_cpu areas
2232 */
Linus Torvalds6bb53ee2018-08-12 12:19:42 -07002233void __init boot_cpu_hotplug_init(void)
Thomas Gleixnercff7d372016-02-26 18:43:28 +00002234{
Abel Vesaaee08612018-08-15 00:26:00 +03002235#ifdef CONFIG_SMP
Thomas Gleixner8438e492018-06-29 16:05:48 +02002236 this_cpu_write(cpuhp_state.booted_once, true);
Abel Vesaaee08612018-08-15 00:26:00 +03002237#endif
Thomas Gleixner8438e492018-06-29 16:05:48 +02002238 this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
Thomas Gleixnercff7d372016-02-26 18:43:28 +00002239}
Josh Poimboeufedda9c32019-04-12 15:39:28 -05002240
Tyler Hickse2bd0772019-11-04 12:22:02 +01002241/*
2242 * These are used for a global "mitigations=" cmdline option for toggling
2243 * optional CPU mitigations.
2244 */
2245enum cpu_mitigations {
2246 CPU_MITIGATIONS_OFF,
2247 CPU_MITIGATIONS_AUTO,
2248 CPU_MITIGATIONS_AUTO_NOSMT,
2249};
2250
2251static enum cpu_mitigations cpu_mitigations __ro_after_init =
2252 CPU_MITIGATIONS_AUTO;
Josh Poimboeufedda9c32019-04-12 15:39:28 -05002253
2254static int __init mitigations_parse_cmdline(char *arg)
2255{
2256 if (!strcmp(arg, "off"))
2257 cpu_mitigations = CPU_MITIGATIONS_OFF;
2258 else if (!strcmp(arg, "auto"))
2259 cpu_mitigations = CPU_MITIGATIONS_AUTO;
2260 else if (!strcmp(arg, "auto,nosmt"))
2261 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
Geert Uytterhoeven0cbb0ae2019-05-16 09:09:35 +02002262 else
2263 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2264 arg);
Josh Poimboeufedda9c32019-04-12 15:39:28 -05002265
2266 return 0;
2267}
2268early_param("mitigations", mitigations_parse_cmdline);
Tyler Hickse2bd0772019-11-04 12:22:02 +01002269
2270/* mitigations=off */
2271bool cpu_mitigations_off(void)
2272{
2273 return cpu_mitigations == CPU_MITIGATIONS_OFF;
2274}
2275EXPORT_SYMBOL_GPL(cpu_mitigations_off);
2276
2277/* mitigations=auto,nosmt */
2278bool cpu_mitigations_auto_nosmt(void)
2279{
2280 return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
2281}
2282EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);