blob: d4409457ae392d56a4dab1e22c2fd763540c3659 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
Thomas Gleixnera3c901b2018-11-25 19:33:39 +010011#include <linux/sched/smt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/unistd.h>
13#include <linux/cpu.h>
Anton Vorontsovcb792952012-05-31 16:26:22 -070014#include <linux/oom.h>
15#include <linux/rcupdate.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040016#include <linux/export.h>
Anton Vorontsove4cc2f82012-05-31 16:26:26 -070017#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/kthread.h>
19#include <linux/stop_machine.h>
Ingo Molnar81615b62006-06-26 00:24:32 -070020#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/gfp.h>
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +010022#include <linux/suspend.h>
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053023#include <linux/lockdep.h>
Preeti U Murthy345527b2015-03-30 14:59:19 +053024#include <linux/tick.h>
Thomas Gleixnera8994182015-07-05 17:12:30 +000025#include <linux/irq.h>
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000026#include <linux/smpboot.h>
Richard Weinbergere6d49892016-08-18 14:57:17 +020027#include <linux/relay.h>
Sebastian Andrzej Siewior6731d4f2016-08-23 14:53:19 +020028#include <linux/slab.h>
Thomas Gleixnercff7d372016-02-26 18:43:28 +000029
Todd E Brandtbb3632c2014-06-06 05:40:17 -070030#include <trace/events/power.h>
Thomas Gleixnercff7d372016-02-26 18:43:28 +000031#define CREATE_TRACE_POINTS
32#include <trace/events/cpuhp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Thomas Gleixner38498a62012-04-20 13:05:44 +000034#include "smpboot.h"
35
Thomas Gleixnercff7d372016-02-26 18:43:28 +000036/**
37 * cpuhp_cpu_state - Per cpu hotplug state storage
38 * @state: The current cpu state
39 * @target: The target state
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000040 * @thread: Pointer to the hotplug thread
41 * @should_run: Thread should execute
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +020042 * @rollback: Perform a rollback
Thomas Gleixnera7246322016-08-12 19:49:38 +020043 * @single: Single callback invocation
44 * @bringup: Single callback bringup or teardown selector
45 * @cb_state: The state for a single callback (install/uninstall)
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000046 * @result: Result of the operation
47 * @done: Signal completion to the issuer of the task
Thomas Gleixnercff7d372016-02-26 18:43:28 +000048 */
49struct cpuhp_cpu_state {
50 enum cpuhp_state state;
51 enum cpuhp_state target;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000052#ifdef CONFIG_SMP
53 struct task_struct *thread;
54 bool should_run;
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +020055 bool rollback;
Thomas Gleixnera7246322016-08-12 19:49:38 +020056 bool single;
57 bool bringup;
Thomas Gleixner8438e492018-06-29 16:05:48 +020058 bool booted_once;
Thomas Gleixnercf392d12016-08-12 19:49:39 +020059 struct hlist_node *node;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000060 enum cpuhp_state cb_state;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000061 int result;
62 struct completion done;
63#endif
Thomas Gleixnercff7d372016-02-26 18:43:28 +000064};
65
66static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
67
Thomas Gleixnerc198e222017-05-24 10:15:43 +020068#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
69static struct lock_class_key cpuhp_state_key;
70static struct lockdep_map cpuhp_state_lock_map =
71 STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key);
72#endif
73
Thomas Gleixnercff7d372016-02-26 18:43:28 +000074/**
75 * cpuhp_step - Hotplug state machine step
76 * @name: Name of the step
77 * @startup: Startup function of the step
78 * @teardown: Teardown function of the step
79 * @skip_onerr: Do not invoke the functions on error rollback
80 * Will go away once the notifiers are gone
Thomas Gleixner757c9892016-02-26 18:43:32 +000081 * @cant_stop: Bringup/teardown can't be stopped at this step
Thomas Gleixnercff7d372016-02-26 18:43:28 +000082 */
83struct cpuhp_step {
Thomas Gleixnercf392d12016-08-12 19:49:39 +020084 const char *name;
85 union {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +020086 int (*single)(unsigned int cpu);
87 int (*multi)(unsigned int cpu,
88 struct hlist_node *node);
89 } startup;
Thomas Gleixnercf392d12016-08-12 19:49:39 +020090 union {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +020091 int (*single)(unsigned int cpu);
92 int (*multi)(unsigned int cpu,
93 struct hlist_node *node);
94 } teardown;
Thomas Gleixnercf392d12016-08-12 19:49:39 +020095 struct hlist_head list;
96 bool skip_onerr;
97 bool cant_stop;
98 bool multi_instance;
Thomas Gleixnercff7d372016-02-26 18:43:28 +000099};
100
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +0000101static DEFINE_MUTEX(cpuhp_state_mutex);
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000102static struct cpuhp_step cpuhp_bp_states[];
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000103static struct cpuhp_step cpuhp_ap_states[];
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000104
Thomas Gleixnera7246322016-08-12 19:49:38 +0200105static bool cpuhp_is_ap_state(enum cpuhp_state state)
106{
107 /*
108 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
109 * purposes as that state is handled explicitly in cpu_down.
110 */
111 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
112}
113
114static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
115{
116 struct cpuhp_step *sp;
117
118 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
119 return sp + state;
120}
121
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000122/**
123 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
124 * @cpu: The cpu for which the callback should be invoked
125 * @step: The step in the state machine
Thomas Gleixnera7246322016-08-12 19:49:38 +0200126 * @bringup: True if the bringup callback should be invoked
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000127 *
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200128 * Called from cpu hotplug and from the state register machinery.
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000129 */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200130static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200131 bool bringup, struct hlist_node *node)
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000132{
133 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Thomas Gleixnera7246322016-08-12 19:49:38 +0200134 struct cpuhp_step *step = cpuhp_get_step(state);
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200135 int (*cbm)(unsigned int cpu, struct hlist_node *node);
136 int (*cb)(unsigned int cpu);
137 int ret, cnt;
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000138
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200139 if (!step->multi_instance) {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200140 cb = bringup ? step->startup.single : step->teardown.single;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200141 if (!cb)
142 return 0;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200143 trace_cpuhp_enter(cpu, st->target, state, cb);
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000144 ret = cb(cpu);
Thomas Gleixnera7246322016-08-12 19:49:38 +0200145 trace_cpuhp_exit(cpu, st->state, state, ret);
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200146 return ret;
147 }
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200148 cbm = bringup ? step->startup.multi : step->teardown.multi;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200149 if (!cbm)
150 return 0;
151
152 /* Single invocation for instance add/remove */
153 if (node) {
154 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
155 ret = cbm(cpu, node);
156 trace_cpuhp_exit(cpu, st->state, state, ret);
157 return ret;
158 }
159
160 /* State transition. Invoke on all instances */
161 cnt = 0;
162 hlist_for_each(node, &step->list) {
163 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
164 ret = cbm(cpu, node);
165 trace_cpuhp_exit(cpu, st->state, state, ret);
166 if (ret)
167 goto err;
168 cnt++;
169 }
170 return 0;
171err:
172 /* Rollback the instances if one failed */
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200173 cbm = !bringup ? step->startup.multi : step->teardown.multi;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200174 if (!cbm)
175 return ret;
176
177 hlist_for_each(node, &step->list) {
178 if (!cnt--)
179 break;
180 cbm(cpu, node);
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000181 }
182 return ret;
183}
184
Rusty Russell98a79d62008-12-13 21:19:41 +1030185#ifdef CONFIG_SMP
Rusty Russellb3199c02008-12-30 09:05:14 +1030186/* Serializes the updates to cpu_online_mask, cpu_present_mask */
Linus Torvaldsaa953872006-07-23 12:12:16 -0700187static DEFINE_MUTEX(cpu_add_remove_lock);
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000188bool cpuhp_tasks_frozen;
189EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700191/*
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530192 * The following two APIs (cpu_maps_update_begin/done) must be used when
193 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
194 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
195 * hotplug callback (un)registration performed using __register_cpu_notifier()
196 * or __unregister_cpu_notifier().
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700197 */
198void cpu_maps_update_begin(void)
199{
200 mutex_lock(&cpu_add_remove_lock);
201}
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530202EXPORT_SYMBOL(cpu_notifier_register_begin);
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700203
204void cpu_maps_update_done(void)
205{
206 mutex_unlock(&cpu_add_remove_lock);
207}
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530208EXPORT_SYMBOL(cpu_notifier_register_done);
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700209
Daniel J Blueman5c113fb2010-06-01 12:15:11 +0100210static RAW_NOTIFIER_HEAD(cpu_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700212/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
213 * Should always be manipulated under cpu_add_remove_lock
214 */
215static int cpu_hotplug_disabled;
216
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700217#ifdef CONFIG_HOTPLUG_CPU
218
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100219static struct {
220 struct task_struct *active_writer;
David Hildenbrand87af9e72014-12-12 10:11:44 +0100221 /* wait queue to wake up the active_writer */
222 wait_queue_head_t wq;
223 /* verifies that no writer will get active while readers are active */
224 struct mutex lock;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100225 /*
226 * Also blocks the new readers during
227 * an ongoing cpu hotplug operation.
228 */
David Hildenbrand87af9e72014-12-12 10:11:44 +0100229 atomic_t refcount;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530230
231#ifdef CONFIG_DEBUG_LOCK_ALLOC
232 struct lockdep_map dep_map;
233#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -0700234} cpu_hotplug = {
235 .active_writer = NULL,
David Hildenbrand87af9e72014-12-12 10:11:44 +0100236 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
Linus Torvalds31950eb2009-06-22 21:18:12 -0700237 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530238#ifdef CONFIG_DEBUG_LOCK_ALLOC
Joonas Lahtinena705e072016-10-12 13:18:56 +0300239 .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530240#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -0700241};
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100242
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530243/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
244#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
Paul E. McKenneydd56af42014-08-25 20:25:06 -0700245#define cpuhp_lock_acquire_tryread() \
246 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530247#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
248#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
249
Paul E. McKenney62db99f2014-10-22 14:51:49 -0700250
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100251void get_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -0800252{
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100253 might_sleep();
254 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -0700255 return;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530256 cpuhp_lock_acquire_read();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100257 mutex_lock(&cpu_hotplug.lock);
David Hildenbrand87af9e72014-12-12 10:11:44 +0100258 atomic_inc(&cpu_hotplug.refcount);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100259 mutex_unlock(&cpu_hotplug.lock);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800260}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100261EXPORT_SYMBOL_GPL(get_online_cpus);
Ashok Raj90d45d12005-11-08 21:34:24 -0800262
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100263void put_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -0800264{
David Hildenbrand87af9e72014-12-12 10:11:44 +0100265 int refcount;
266
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100267 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -0700268 return;
Srivatsa S. Bhat075663d2012-10-08 16:28:20 -0700269
David Hildenbrand87af9e72014-12-12 10:11:44 +0100270 refcount = atomic_dec_return(&cpu_hotplug.refcount);
271 if (WARN_ON(refcount < 0)) /* try to fix things up */
272 atomic_inc(&cpu_hotplug.refcount);
Srivatsa S. Bhat075663d2012-10-08 16:28:20 -0700273
David Hildenbrand87af9e72014-12-12 10:11:44 +0100274 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
275 wake_up(&cpu_hotplug.wq);
276
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530277 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100278
Ashok Raja9d9baa2005-11-28 13:43:46 -0800279}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100280EXPORT_SYMBOL_GPL(put_online_cpus);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800281
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100282/*
283 * This ensures that the hotplug operation can begin only when the
284 * refcount goes to zero.
285 *
286 * Note that during a cpu-hotplug operation, the new readers, if any,
287 * will be blocked by the cpu_hotplug.lock
288 *
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700289 * Since cpu_hotplug_begin() is always called after invoking
290 * cpu_maps_update_begin(), we can be sure that only one writer is active.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100291 *
292 * Note that theoretically, there is a possibility of a livelock:
293 * - Refcount goes to zero, last reader wakes up the sleeping
294 * writer.
295 * - Last reader unlocks the cpu_hotplug.lock.
296 * - A new reader arrives at this moment, bumps up the refcount.
297 * - The writer acquires the cpu_hotplug.lock finds the refcount
298 * non zero and goes to sleep again.
299 *
300 * However, this is very difficult to achieve in practice since
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100301 * get_online_cpus() not an api which is called all that often.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100302 *
303 */
Toshi Kanib9d10be2013-08-12 09:45:53 -0600304void cpu_hotplug_begin(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100305{
David Hildenbrand87af9e72014-12-12 10:11:44 +0100306 DEFINE_WAIT(wait);
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700307
David Hildenbrand87af9e72014-12-12 10:11:44 +0100308 cpu_hotplug.active_writer = current;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530309 cpuhp_lock_acquire();
David Hildenbrand87af9e72014-12-12 10:11:44 +0100310
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700311 for (;;) {
312 mutex_lock(&cpu_hotplug.lock);
David Hildenbrand87af9e72014-12-12 10:11:44 +0100313 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
314 if (likely(!atomic_read(&cpu_hotplug.refcount)))
315 break;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100316 mutex_unlock(&cpu_hotplug.lock);
317 schedule();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100318 }
David Hildenbrand87af9e72014-12-12 10:11:44 +0100319 finish_wait(&cpu_hotplug.wq, &wait);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100320}
321
Toshi Kanib9d10be2013-08-12 09:45:53 -0600322void cpu_hotplug_done(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100323{
324 cpu_hotplug.active_writer = NULL;
325 mutex_unlock(&cpu_hotplug.lock);
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530326 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100327}
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700328
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700329/*
330 * Wait for currently running CPU hotplug operations to complete (if any) and
331 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
332 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
333 * hotplug path before performing hotplug operations. So acquiring that lock
334 * guarantees mutual exclusion from any currently running hotplug operations.
335 */
336void cpu_hotplug_disable(void)
337{
338 cpu_maps_update_begin();
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700339 cpu_hotplug_disabled++;
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700340 cpu_maps_update_done();
341}
Vitaly Kuznetsov32145c42015-08-05 00:52:47 -0700342EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700343
Lianwei Wang01b41152016-06-09 23:43:28 -0700344static void __cpu_hotplug_enable(void)
345{
346 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
347 return;
348 cpu_hotplug_disabled--;
349}
350
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700351void cpu_hotplug_enable(void)
352{
353 cpu_maps_update_begin();
Lianwei Wang01b41152016-06-09 23:43:28 -0700354 __cpu_hotplug_enable();
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700355 cpu_maps_update_done();
356}
Vitaly Kuznetsov32145c42015-08-05 00:52:47 -0700357EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
Toshi Kanib9d10be2013-08-12 09:45:53 -0600358#endif /* CONFIG_HOTPLUG_CPU */
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700359
Thomas Gleixnera3c901b2018-11-25 19:33:39 +0100360/*
361 * Architectures that need SMT-specific errata handling during SMT hotplug
362 * should override this.
363 */
364void __weak arch_smt_update(void) { }
365
Thomas Gleixner8438e492018-06-29 16:05:48 +0200366#ifdef CONFIG_HOTPLUG_SMT
367enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
Konrad Rzeszutek Wilka0695af2018-06-20 11:29:53 -0400368EXPORT_SYMBOL_GPL(cpu_smt_control);
Thomas Gleixner8438e492018-06-29 16:05:48 +0200369
Thomas Gleixnerc504b9f2018-08-07 08:19:57 +0200370static bool cpu_smt_available __read_mostly;
371
Jiri Kosinaa69c5e02018-07-13 16:23:23 +0200372void __init cpu_smt_disable(bool force)
Thomas Gleixner8438e492018-06-29 16:05:48 +0200373{
Jiri Kosinaa69c5e02018-07-13 16:23:23 +0200374 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
375 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
376 return;
377
378 if (force) {
Thomas Gleixner8438e492018-06-29 16:05:48 +0200379 pr_info("SMT: Force disabled\n");
380 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
Jiri Kosinaa69c5e02018-07-13 16:23:23 +0200381 } else {
Borislav Petkov6270cc32018-10-04 19:22:27 +0200382 pr_info("SMT: disabled\n");
Jiri Kosinaa69c5e02018-07-13 16:23:23 +0200383 cpu_smt_control = CPU_SMT_DISABLED;
Thomas Gleixner8438e492018-06-29 16:05:48 +0200384 }
Jiri Kosinaa69c5e02018-07-13 16:23:23 +0200385}
386
Thomas Gleixner929d3b22018-07-13 16:23:24 +0200387/*
388 * The decision whether SMT is supported can only be done after the full
Thomas Gleixnerc504b9f2018-08-07 08:19:57 +0200389 * CPU identification. Called from architecture code before non boot CPUs
390 * are brought up.
391 */
392void __init cpu_smt_check_topology_early(void)
393{
394 if (!topology_smt_supported())
395 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
396}
397
398/*
399 * If SMT was disabled by BIOS, detect it here, after the CPUs have been
400 * brought online. This ensures the smt/l1tf sysfs entries are consistent
401 * with reality. cpu_smt_available is set to true during the bringup of non
402 * boot CPUs when a SMT sibling is detected. Note, this may overwrite
403 * cpu_smt_control's previous setting.
Thomas Gleixner929d3b22018-07-13 16:23:24 +0200404 */
405void __init cpu_smt_check_topology(void)
406{
Thomas Gleixnerc504b9f2018-08-07 08:19:57 +0200407 if (!cpu_smt_available)
Thomas Gleixner929d3b22018-07-13 16:23:24 +0200408 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
409}
410
Jiri Kosinaa69c5e02018-07-13 16:23:23 +0200411static int __init smt_cmdline_disable(char *str)
412{
413 cpu_smt_disable(str && !strcmp(str, "force"));
Thomas Gleixner8438e492018-06-29 16:05:48 +0200414 return 0;
415}
416early_param("nosmt", smt_cmdline_disable);
417
418static inline bool cpu_smt_allowed(unsigned int cpu)
419{
Thomas Gleixnerc504b9f2018-08-07 08:19:57 +0200420 if (topology_is_primary_thread(cpu))
Thomas Gleixner8438e492018-06-29 16:05:48 +0200421 return true;
422
Thomas Gleixnerc504b9f2018-08-07 08:19:57 +0200423 /*
424 * If the CPU is not a 'primary' thread and the booted_once bit is
425 * set then the processor has SMT support. Store this information
426 * for the late check of SMT support in cpu_smt_check_topology().
427 */
428 if (per_cpu(cpuhp_state, cpu).booted_once)
429 cpu_smt_available = true;
430
431 if (cpu_smt_control == CPU_SMT_ENABLED)
Thomas Gleixner8438e492018-06-29 16:05:48 +0200432 return true;
433
434 /*
435 * On x86 it's required to boot all logical CPUs at least once so
436 * that the init code can get a chance to set CR4.MCE on each
437 * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
438 * core will shutdown the machine.
439 */
440 return !per_cpu(cpuhp_state, cpu).booted_once;
441}
442#else
443static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
444#endif
445
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446/* Need to know about CPUs going up/down? */
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200447int register_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448{
Neil Brownbd5349c2006-10-17 00:10:35 -0700449 int ret;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100450 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700451 ret = raw_notifier_chain_register(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100452 cpu_maps_update_done();
Neil Brownbd5349c2006-10-17 00:10:35 -0700453 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454}
Chandra Seetharaman65edc682006-06-27 02:54:08 -0700455
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200456int __register_cpu_notifier(struct notifier_block *nb)
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530457{
458 return raw_notifier_chain_register(&cpu_chain, nb);
459}
460
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000461static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700462 int *nr_calls)
463{
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000464 unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
465 void *hcpu = (void *)(long)cpu;
466
Akinobu Mitae6bde732010-05-26 14:43:29 -0700467 int ret;
468
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000469 ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700470 nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700471
472 return notifier_to_errno(ret);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700473}
474
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000475static int cpu_notify(unsigned long val, unsigned int cpu)
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700476{
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000477 return __cpu_notify(val, cpu, -1, NULL);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700478}
479
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200480static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
481{
482 BUG_ON(cpu_notify(val, cpu));
483}
484
Thomas Gleixnerba997462016-02-26 18:43:24 +0000485/* Notifier wrappers for transitioning to state machine */
486static int notify_prepare(unsigned int cpu)
487{
488 int nr_calls = 0;
489 int ret;
490
491 ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
492 if (ret) {
493 nr_calls--;
494 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
495 __func__, cpu);
496 __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
497 }
498 return ret;
499}
500
501static int notify_online(unsigned int cpu)
502{
503 cpu_notify(CPU_ONLINE, cpu);
504 return 0;
505}
506
Thomas Gleixner7b4e4b12017-07-04 22:20:23 +0200507static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st);
508
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000509static int bringup_wait_for_ap(unsigned int cpu)
510{
511 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
512
Thomas Gleixner7b4e4b12017-07-04 22:20:23 +0200513 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000514 wait_for_completion(&st->done);
Thomas Gleixner6b3d13f2017-07-11 22:06:24 +0200515 if (WARN_ON_ONCE((!cpu_online(cpu))))
516 return -ECANCELED;
Thomas Gleixner7b4e4b12017-07-04 22:20:23 +0200517
Peter Zijlstraa594a9e2019-12-10 09:34:54 +0100518 /* Unpark the hotplug thread of the target cpu */
Thomas Gleixner7b4e4b12017-07-04 22:20:23 +0200519 kthread_unpark(st->thread);
520
Thomas Gleixner8438e492018-06-29 16:05:48 +0200521 /*
522 * SMT soft disabling on X86 requires to bring the CPU out of the
523 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
524 * CPU marked itself as booted_once in cpu_notify_starting() so the
525 * cpu_smt_allowed() check will now return false if this is not the
526 * primary sibling.
527 */
528 if (!cpu_smt_allowed(cpu))
529 return -ECANCELED;
530
Thomas Gleixner7b4e4b12017-07-04 22:20:23 +0200531 /* Should we go further up ? */
532 if (st->target > CPUHP_AP_ONLINE_IDLE) {
533 __cpuhp_kick_ap_work(st);
534 wait_for_completion(&st->done);
535 }
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000536 return st->result;
537}
538
Thomas Gleixnerba997462016-02-26 18:43:24 +0000539static int bringup_cpu(unsigned int cpu)
540{
541 struct task_struct *idle = idle_thread_get(cpu);
542 int ret;
543
Boris Ostrovskyaa877172016-08-03 13:22:28 -0400544 /*
545 * Some architectures have to walk the irq descriptors to
546 * setup the vector space for the cpu which comes online.
547 * Prevent irq alloc/free across the bringup.
548 */
549 irq_lock_sparse();
550
Thomas Gleixnerba997462016-02-26 18:43:24 +0000551 /* Arch-specific enabling code. */
552 ret = __cpu_up(cpu, idle);
Boris Ostrovskyaa877172016-08-03 13:22:28 -0400553 irq_unlock_sparse();
Thomas Gleixnerba997462016-02-26 18:43:24 +0000554 if (ret) {
555 cpu_notify(CPU_UP_CANCELED, cpu);
556 return ret;
557 }
Thomas Gleixner7b4e4b12017-07-04 22:20:23 +0200558 return bringup_wait_for_ap(cpu);
Thomas Gleixnerba997462016-02-26 18:43:24 +0000559}
560
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000561/*
562 * Hotplug state machine related functions
563 */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200564static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000565{
566 for (st->state++; st->state < st->target; st->state++) {
Thomas Gleixnera7246322016-08-12 19:49:38 +0200567 struct cpuhp_step *step = cpuhp_get_step(st->state);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000568
569 if (!step->skip_onerr)
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200570 cpuhp_invoke_callback(cpu, st->state, true, NULL);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000571 }
572}
573
574static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
Thomas Gleixnera7246322016-08-12 19:49:38 +0200575 enum cpuhp_state target)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000576{
577 enum cpuhp_state prev_state = st->state;
578 int ret = 0;
579
580 for (; st->state > target; st->state--) {
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200581 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000582 if (ret) {
583 st->target = prev_state;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200584 undo_cpu_down(cpu, st);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000585 break;
586 }
587 }
588 return ret;
589}
590
Thomas Gleixnera7246322016-08-12 19:49:38 +0200591static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000592{
593 for (st->state--; st->state > st->target; st->state--) {
Thomas Gleixnera7246322016-08-12 19:49:38 +0200594 struct cpuhp_step *step = cpuhp_get_step(st->state);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000595
596 if (!step->skip_onerr)
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200597 cpuhp_invoke_callback(cpu, st->state, false, NULL);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000598 }
599}
600
Thomas Gleixnerce4fbb92019-03-26 17:36:05 +0100601static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
602{
603 if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
604 return true;
605 /*
606 * When CPU hotplug is disabled, then taking the CPU down is not
607 * possible because takedown_cpu() and the architecture and
608 * subsystem specific mechanisms are not available. So the CPU
609 * which would be completely unplugged again needs to stay around
610 * in the current state.
611 */
612 return st->state <= CPUHP_BRINGUP_CPU;
613}
614
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000615static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
Thomas Gleixnera7246322016-08-12 19:49:38 +0200616 enum cpuhp_state target)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000617{
618 enum cpuhp_state prev_state = st->state;
619 int ret = 0;
620
621 while (st->state < target) {
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000622 st->state++;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200623 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000624 if (ret) {
Thomas Gleixnerce4fbb92019-03-26 17:36:05 +0100625 if (can_rollback_cpu(st)) {
626 st->target = prev_state;
627 undo_cpu_up(cpu, st);
628 }
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000629 break;
630 }
631 }
632 return ret;
633}
634
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000635/*
636 * The cpu hotplug threads manage the bringup and teardown of the cpus
637 */
638static void cpuhp_create(unsigned int cpu)
639{
640 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
641
642 init_completion(&st->done);
643}
644
645static int cpuhp_should_run(unsigned int cpu)
646{
647 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
648
649 return st->should_run;
650}
651
652/* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
653static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
654{
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000655 enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000656
Thomas Gleixnera7246322016-08-12 19:49:38 +0200657 return cpuhp_down_callbacks(cpu, st, target);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000658}
659
660/* Execute the online startup callbacks. Used to be CPU_ONLINE */
661static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
662{
Thomas Gleixnera7246322016-08-12 19:49:38 +0200663 return cpuhp_up_callbacks(cpu, st, st->target);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000664}
665
666/*
667 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
668 * callbacks when a state gets [un]installed at runtime.
669 */
670static void cpuhp_thread_fun(unsigned int cpu)
671{
672 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
673 int ret = 0;
674
675 /*
676 * Paired with the mb() in cpuhp_kick_ap_work and
677 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
678 */
679 smp_mb();
680 if (!st->should_run)
681 return;
682
683 st->should_run = false;
684
Thomas Gleixnerc198e222017-05-24 10:15:43 +0200685 lock_map_acquire(&cpuhp_state_lock_map);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000686 /* Single callback invocation for [un]install ? */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200687 if (st->single) {
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000688 if (st->cb_state < CPUHP_AP_ONLINE) {
689 local_irq_disable();
Thomas Gleixnera7246322016-08-12 19:49:38 +0200690 ret = cpuhp_invoke_callback(cpu, st->cb_state,
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200691 st->bringup, st->node);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000692 local_irq_enable();
693 } else {
Thomas Gleixnera7246322016-08-12 19:49:38 +0200694 ret = cpuhp_invoke_callback(cpu, st->cb_state,
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200695 st->bringup, st->node);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000696 }
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200697 } else if (st->rollback) {
698 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
699
Thomas Gleixnera7246322016-08-12 19:49:38 +0200700 undo_cpu_down(cpu, st);
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200701 /*
702 * This is a momentary workaround to keep the notifier users
703 * happy. Will go away once we got rid of the notifiers.
704 */
705 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
706 st->rollback = false;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000707 } else {
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000708 /* Cannot happen .... */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000709 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000710
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000711 /* Regular hotplug work */
712 if (st->state < st->target)
713 ret = cpuhp_ap_online(cpu, st);
714 else if (st->state > st->target)
715 ret = cpuhp_ap_offline(cpu, st);
716 }
Thomas Gleixnerc198e222017-05-24 10:15:43 +0200717 lock_map_release(&cpuhp_state_lock_map);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000718 st->result = ret;
719 complete(&st->done);
720}
721
722/* Invoke a single callback on a remote cpu */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200723static int
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200724cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
725 struct hlist_node *node)
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000726{
727 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
728
729 if (!cpu_online(cpu))
730 return 0;
731
Thomas Gleixnerc198e222017-05-24 10:15:43 +0200732 lock_map_acquire(&cpuhp_state_lock_map);
733 lock_map_release(&cpuhp_state_lock_map);
734
Thomas Gleixner6a4e2452016-07-13 17:16:03 +0000735 /*
736 * If we are up and running, use the hotplug thread. For early calls
737 * we invoke the thread function directly.
738 */
739 if (!st->thread)
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200740 return cpuhp_invoke_callback(cpu, state, bringup, node);
Thomas Gleixner6a4e2452016-07-13 17:16:03 +0000741
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000742 st->cb_state = state;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200743 st->single = true;
744 st->bringup = bringup;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200745 st->node = node;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200746
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000747 /*
748 * Make sure the above stores are visible before should_run becomes
749 * true. Paired with the mb() above in cpuhp_thread_fun()
750 */
751 smp_mb();
752 st->should_run = true;
753 wake_up_process(st->thread);
754 wait_for_completion(&st->done);
755 return st->result;
756}
757
758/* Regular hotplug invocation of the AP hotplug thread */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000759static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000760{
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000761 st->result = 0;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200762 st->single = false;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000763 /*
764 * Make sure the above stores are visible before should_run becomes
765 * true. Paired with the mb() above in cpuhp_thread_fun()
766 */
767 smp_mb();
768 st->should_run = true;
769 wake_up_process(st->thread);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000770}
771
772static int cpuhp_kick_ap_work(unsigned int cpu)
773{
774 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
775 enum cpuhp_state state = st->state;
776
777 trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
Thomas Gleixnerc198e222017-05-24 10:15:43 +0200778 lock_map_acquire(&cpuhp_state_lock_map);
779 lock_map_release(&cpuhp_state_lock_map);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000780 __cpuhp_kick_ap_work(st);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000781 wait_for_completion(&st->done);
782 trace_cpuhp_exit(cpu, st->state, state, st->result);
783 return st->result;
784}
785
786static struct smp_hotplug_thread cpuhp_threads = {
787 .store = &cpuhp_state.thread,
788 .create = &cpuhp_create,
789 .thread_should_run = cpuhp_should_run,
790 .thread_fn = cpuhp_thread_fun,
791 .thread_comm = "cpuhp/%u",
792 .selfparking = true,
793};
794
795void __init cpuhp_threads_init(void)
796{
797 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
798 kthread_unpark(this_cpu_read(cpuhp_state.thread));
799}
800
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801EXPORT_SYMBOL(register_cpu_notifier);
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530802EXPORT_SYMBOL(__register_cpu_notifier);
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200803void unregister_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804{
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100805 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700806 raw_notifier_chain_unregister(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100807 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808}
809EXPORT_SYMBOL(unregister_cpu_notifier);
810
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200811void __unregister_cpu_notifier(struct notifier_block *nb)
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530812{
813 raw_notifier_chain_unregister(&cpu_chain, nb);
814}
815EXPORT_SYMBOL(__unregister_cpu_notifier);
816
Michal Hocko56eaecc2016-12-07 14:54:38 +0100817#ifdef CONFIG_HOTPLUG_CPU
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700818/**
819 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
820 * @cpu: a CPU id
821 *
822 * This function walks all processes, finds a valid mm struct for each one and
823 * then clears a corresponding bit in mm's cpumask. While this all sounds
824 * trivial, there are various non-obvious corner cases, which this function
825 * tries to solve in a safe manner.
826 *
827 * Also note that the function uses a somewhat relaxed locking scheme, so it may
828 * be called only for an already offlined CPU.
829 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700830void clear_tasks_mm_cpumask(int cpu)
831{
832 struct task_struct *p;
833
834 /*
835 * This function is called after the cpu is taken down and marked
836 * offline, so its not like new tasks will ever get this cpu set in
837 * their mm mask. -- Peter Zijlstra
838 * Thus, we may use rcu_read_lock() here, instead of grabbing
839 * full-fledged tasklist_lock.
840 */
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700841 WARN_ON(cpu_online(cpu));
Anton Vorontsovcb792952012-05-31 16:26:22 -0700842 rcu_read_lock();
843 for_each_process(p) {
844 struct task_struct *t;
845
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700846 /*
847 * Main thread might exit, but other threads may still have
848 * a valid mm. Find one.
849 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700850 t = find_lock_task_mm(p);
851 if (!t)
852 continue;
853 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
854 task_unlock(t);
855 }
856 rcu_read_unlock();
857}
858
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400859static inline void check_for_tasks(int dead_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860{
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400861 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
Oleg Nesterova75a6062015-09-10 15:07:50 +0200863 read_lock(&tasklist_lock);
864 for_each_process_thread(g, p) {
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400865 if (!p->on_rq)
866 continue;
867 /*
868 * We do the check with unlocked task_rq(p)->lock.
869 * Order the reading to do not warn about a task,
870 * which was running on this cpu in the past, and
871 * it's just been woken on another cpu.
872 */
873 rmb();
874 if (task_cpu(p) != dead_cpu)
875 continue;
876
877 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
878 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
Oleg Nesterova75a6062015-09-10 15:07:50 +0200879 }
880 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881}
882
Thomas Gleixner98458172016-02-26 18:43:25 +0000883static int notify_down_prepare(unsigned int cpu)
884{
885 int err, nr_calls = 0;
886
887 err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
888 if (err) {
889 nr_calls--;
890 __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
891 pr_warn("%s: attempt to take down CPU %u failed\n",
892 __func__, cpu);
893 }
894 return err;
895}
896
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897/* Take this CPU down. */
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200898static int take_cpu_down(void *_param)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899{
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000900 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
901 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000902 int err, cpu = smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 /* Ensure this CPU doesn't handle any more interrupts. */
905 err = __cpu_disable();
906 if (err < 0)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700907 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
Thomas Gleixnera7246322016-08-12 19:49:38 +0200909 /*
910 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
911 * do this step again.
912 */
913 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
914 st->state--;
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000915 /* Invoke the former CPU_DYING callbacks */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200916 for (; st->state > target; st->state--)
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200917 cpuhp_invoke_callback(cpu, st->state, false, NULL);
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000918
Thomas Gleixner52c063d2015-04-03 02:37:24 +0200919 /* Give up timekeeping duties */
920 tick_handover_do_timer();
Thomas Gleixner14e568e2013-01-31 12:11:14 +0000921 /* Park the stopper thread */
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000922 stop_machine_park(cpu);
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700923 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924}
925
Thomas Gleixner98458172016-02-26 18:43:25 +0000926static int takedown_cpu(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927{
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000928 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Thomas Gleixner98458172016-02-26 18:43:25 +0000929 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930
Thomas Gleixner2a58c522016-03-10 20:42:08 +0100931 /* Park the smpboot threads */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000932 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
933
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200934 /*
Thomas Gleixnera8994182015-07-05 17:12:30 +0000935 * Prevent irq alloc/free while the dying cpu reorganizes the
936 * interrupt affinities.
937 */
938 irq_lock_sparse();
939
940 /*
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200941 * So now all preempt/rcu users must observe !cpu_active().
942 */
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000943 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
Rusty Russell04321582008-07-28 12:16:29 -0500944 if (err) {
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200945 /* CPU refused to die */
Thomas Gleixnera8994182015-07-05 17:12:30 +0000946 irq_unlock_sparse();
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200947 /* Unpark the hotplug thread so we can rollback there */
948 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
Thomas Gleixner98458172016-02-26 18:43:25 +0000949 return err;
Satoru Takeuchi8fa1d7d2006-10-28 10:38:57 -0700950 }
Rusty Russell04321582008-07-28 12:16:29 -0500951 BUG_ON(cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100953 /*
Thomas Gleixneree1e7142016-08-18 14:57:16 +0200954 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100955 * runnable tasks from the cpu, there's only the idle task left now
956 * that the migration thread is done doing the stop_machine thing.
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100957 *
958 * Wait for the stop thread to go away.
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100959 */
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000960 wait_for_completion(&st->done);
961 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962
Thomas Gleixnera8994182015-07-05 17:12:30 +0000963 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
964 irq_unlock_sparse();
965
Preeti U Murthy345527b2015-03-30 14:59:19 +0530966 hotplug_cpu__broadcast_tick_pull(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 /* This actually kills the CPU. */
968 __cpu_die(cpu);
969
Thomas Gleixnera49b1162015-04-03 02:38:05 +0200970 tick_cleanup_dead_cpu(cpu);
Thomas Gleixner98458172016-02-26 18:43:25 +0000971 return 0;
972}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973
Thomas Gleixner98458172016-02-26 18:43:25 +0000974static int notify_dead(unsigned int cpu)
975{
976 cpu_notify_nofail(CPU_DEAD, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 check_for_tasks(cpu);
Thomas Gleixner98458172016-02-26 18:43:25 +0000978 return 0;
979}
980
Thomas Gleixner71f87b22016-03-03 10:52:10 +0100981static void cpuhp_complete_idle_dead(void *arg)
982{
983 struct cpuhp_cpu_state *st = arg;
984
985 complete(&st->done);
986}
987
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000988void cpuhp_report_idle_dead(void)
989{
990 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
991
992 BUG_ON(st->state != CPUHP_AP_OFFLINE);
Thomas Gleixner27d50c72016-02-26 18:43:44 +0000993 rcu_report_dead(smp_processor_id());
Thomas Gleixner71f87b22016-03-03 10:52:10 +0100994 st->state = CPUHP_AP_IDLE_DEAD;
995 /*
996 * We cannot call complete after rcu_report_dead() so we delegate it
997 * to an online cpu.
998 */
999 smp_call_function_single(cpumask_first(cpu_online_mask),
1000 cpuhp_complete_idle_dead, st, 0);
Thomas Gleixnere69aab12016-02-26 18:43:43 +00001001}
1002
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001003#else
1004#define notify_down_prepare NULL
1005#define takedown_cpu NULL
1006#define notify_dead NULL
1007#endif
1008
1009#ifdef CONFIG_HOTPLUG_CPU
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001010
Thomas Gleixner98458172016-02-26 18:43:25 +00001011/* Requires cpu_add_remove_lock to be held */
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001012static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1013 enum cpuhp_state target)
Thomas Gleixner98458172016-02-26 18:43:25 +00001014{
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001015 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1016 int prev_state, ret = 0;
1017 bool hasdied = false;
Thomas Gleixner98458172016-02-26 18:43:25 +00001018
1019 if (num_online_cpus() == 1)
1020 return -EBUSY;
1021
Thomas Gleixner757c9892016-02-26 18:43:32 +00001022 if (!cpu_present(cpu))
Thomas Gleixner98458172016-02-26 18:43:25 +00001023 return -EINVAL;
1024
1025 cpu_hotplug_begin();
1026
1027 cpuhp_tasks_frozen = tasks_frozen;
1028
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001029 prev_state = st->state;
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001030 st->target = target;
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001031 /*
1032 * If the current CPU state is in the range of the AP hotplug thread,
1033 * then we need to kick the thread.
1034 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001035 if (st->state > CPUHP_TEARDOWN_CPU) {
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001036 ret = cpuhp_kick_ap_work(cpu);
1037 /*
1038 * The AP side has done the error rollback already. Just
1039 * return the error code..
1040 */
1041 if (ret)
1042 goto out;
1043
1044 /*
1045 * We might have stopped still in the range of the AP hotplug
1046 * thread. Nothing to do anymore.
1047 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001048 if (st->state > CPUHP_TEARDOWN_CPU)
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001049 goto out;
1050 }
1051 /*
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001052 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001053 * to do the further cleanups.
1054 */
Thomas Gleixnera7246322016-08-12 19:49:38 +02001055 ret = cpuhp_down_callbacks(cpu, st, target);
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +02001056 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
1057 st->target = prev_state;
1058 st->rollback = true;
1059 cpuhp_kick_ap_work(cpu);
1060 }
Thomas Gleixner98458172016-02-26 18:43:25 +00001061
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001062 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001063out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001064 cpu_hotplug_done();
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001065 /* This post dead nonsense must die */
1066 if (!ret && hasdied)
Thomas Gleixner090e77c2016-02-26 18:43:23 +00001067 cpu_notify_nofail(CPU_POST_DEAD, cpu);
Thomas Gleixnera3c901b2018-11-25 19:33:39 +01001068 arch_smt_update();
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001069 return ret;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001070}
1071
Thomas Gleixner373b8de2018-05-29 17:49:05 +02001072static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1073{
1074 if (cpu_hotplug_disabled)
1075 return -EBUSY;
1076 return _cpu_down(cpu, 0, target);
1077}
1078
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001079static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001080{
Heiko Carstens9ea09af2008-12-22 12:36:30 +01001081 int err;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001082
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001083 cpu_maps_update_begin();
Thomas Gleixner373b8de2018-05-29 17:49:05 +02001084 err = cpu_down_maps_locked(cpu, target);
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001085 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 return err;
1087}
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001088int cpu_down(unsigned int cpu)
1089{
1090 return do_cpu_down(cpu, CPUHP_OFFLINE);
1091}
Zhang Ruib62b8ef2008-04-29 02:35:56 -04001092EXPORT_SYMBOL(cpu_down);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093#endif /*CONFIG_HOTPLUG_CPU*/
1094
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001095/**
Thomas Gleixneree1e7142016-08-18 14:57:16 +02001096 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001097 * @cpu: cpu that just started
1098 *
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001099 * It must be called by the arch code on the new cpu, before the new cpu
1100 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1101 */
1102void notify_cpu_starting(unsigned int cpu)
1103{
1104 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1105 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1106
Sebastian Andrzej Siewior0c6d4572016-08-17 14:21:04 +02001107 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
Thomas Gleixner8438e492018-06-29 16:05:48 +02001108 st->booted_once = true;
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001109 while (st->state < target) {
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001110 st->state++;
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001111 cpuhp_invoke_callback(cpu, st->state, true, NULL);
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001112 }
1113}
1114
Thomas Gleixner949338e2016-02-26 18:43:35 +00001115/*
Thomas Gleixner7b4e4b12017-07-04 22:20:23 +02001116 * Called from the idle task. Wake up the controlling task which brings the
Peter Zijlstraa594a9e2019-12-10 09:34:54 +01001117 * hotplug thread of the upcoming CPU up and then delegates the rest of the
1118 * online bringup to the hotplug thread.
Thomas Gleixner949338e2016-02-26 18:43:35 +00001119 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001120void cpuhp_online_idle(enum cpuhp_state state)
Thomas Gleixner949338e2016-02-26 18:43:35 +00001121{
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001122 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001123
1124 /* Happens for the boot cpu */
1125 if (state != CPUHP_AP_ONLINE_IDLE)
1126 return;
1127
Peter Zijlstraa594a9e2019-12-10 09:34:54 +01001128 /*
1129 * Unpart the stopper thread before we start the idle loop (and start
1130 * scheduling); this ensures the stopper task is always available.
1131 */
1132 stop_machine_unpark(smp_processor_id());
1133
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001134 st->state = CPUHP_AP_ONLINE_IDLE;
Thomas Gleixner7b4e4b12017-07-04 22:20:23 +02001135 complete(&st->done);
Thomas Gleixner949338e2016-02-26 18:43:35 +00001136}
1137
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001138/* Requires cpu_add_remove_lock to be held */
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001139static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140{
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001141 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -07001142 struct task_struct *idle;
Thomas Gleixner2e1a3482016-02-26 18:43:37 +00001143 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001145 cpu_hotplug_begin();
Thomas Gleixner38498a62012-04-20 13:05:44 +00001146
Thomas Gleixner757c9892016-02-26 18:43:32 +00001147 if (!cpu_present(cpu)) {
Yasuaki Ishimatsu5e5041f2012-10-23 01:30:54 +02001148 ret = -EINVAL;
1149 goto out;
1150 }
1151
Thomas Gleixner757c9892016-02-26 18:43:32 +00001152 /*
1153 * The caller of do_cpu_up might have raced with another
1154 * caller. Ignore it for now.
1155 */
1156 if (st->state >= target)
Thomas Gleixner38498a62012-04-20 13:05:44 +00001157 goto out;
Thomas Gleixner757c9892016-02-26 18:43:32 +00001158
1159 if (st->state == CPUHP_OFFLINE) {
1160 /* Let it fail before we try to bring the cpu up */
1161 idle = idle_thread_get(cpu);
1162 if (IS_ERR(idle)) {
1163 ret = PTR_ERR(idle);
1164 goto out;
1165 }
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -07001166 }
Thomas Gleixner38498a62012-04-20 13:05:44 +00001167
Thomas Gleixnerba997462016-02-26 18:43:24 +00001168 cpuhp_tasks_frozen = tasks_frozen;
1169
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001170 st->target = target;
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001171 /*
1172 * If the current CPU state is in the range of the AP hotplug thread,
1173 * then we need to kick the thread once more.
1174 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001175 if (st->state > CPUHP_BRINGUP_CPU) {
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001176 ret = cpuhp_kick_ap_work(cpu);
1177 /*
1178 * The AP side has done the error rollback already. Just
1179 * return the error code..
1180 */
1181 if (ret)
1182 goto out;
1183 }
1184
1185 /*
1186 * Try to reach the target state. We max out on the BP at
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001187 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001188 * responsible for bringing it up to the target state.
1189 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001190 target = min((int)target, CPUHP_BRINGUP_CPU);
Thomas Gleixnera7246322016-08-12 19:49:38 +02001191 ret = cpuhp_up_callbacks(cpu, st, target);
Thomas Gleixner38498a62012-04-20 13:05:44 +00001192out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001193 cpu_hotplug_done();
Thomas Gleixnera3c901b2018-11-25 19:33:39 +01001194 arch_smt_update();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 return ret;
1196}
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001197
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001198static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001199{
1200 int err = 0;
minskey guocf234222010-05-24 14:32:41 -07001201
Rusty Russelle0b582e2009-01-01 10:12:28 +10301202 if (!cpu_possible(cpu)) {
Fabian Frederick84117da2014-06-04 16:11:17 -07001203 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1204 cpu);
Chen Gong87d5e022010-03-05 13:42:38 -08001205#if defined(CONFIG_IA64)
Fabian Frederick84117da2014-06-04 16:11:17 -07001206 pr_err("please check additional_cpus= boot parameter\n");
KAMEZAWA Hiroyuki73e753a2007-10-18 23:40:47 -07001207#endif
1208 return -EINVAL;
1209 }
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001210
Toshi Kani01b0f192013-11-12 15:07:25 -08001211 err = try_online_node(cpu_to_node(cpu));
1212 if (err)
1213 return err;
minskey guocf234222010-05-24 14:32:41 -07001214
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001215 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001216
Max Krasnyanskye761b772008-07-15 04:43:49 -07001217 if (cpu_hotplug_disabled) {
1218 err = -EBUSY;
1219 goto out;
1220 }
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02001221 if (!cpu_smt_allowed(cpu)) {
1222 err = -EPERM;
1223 goto out;
1224 }
Max Krasnyanskye761b772008-07-15 04:43:49 -07001225
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001226 err = _cpu_up(cpu, 0, target);
Max Krasnyanskye761b772008-07-15 04:43:49 -07001227out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001228 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001229 return err;
1230}
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001231
1232int cpu_up(unsigned int cpu)
1233{
1234 return do_cpu_up(cpu, CPUHP_ONLINE);
1235}
Paul E. McKenneya513f6b2011-12-11 21:54:45 -08001236EXPORT_SYMBOL_GPL(cpu_up);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001237
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -07001238#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +10301239static cpumask_var_t frozen_cpus;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001240
James Morsed391e552016-08-17 13:50:25 +01001241int freeze_secondary_cpus(int primary)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001242{
James Morsed391e552016-08-17 13:50:25 +01001243 int cpu, error = 0;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001244
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001245 cpu_maps_update_begin();
James Morsed391e552016-08-17 13:50:25 +01001246 if (!cpu_online(primary))
1247 primary = cpumask_first(cpu_online_mask);
Xiaotian Feng9ee349a2009-12-16 18:04:32 +01001248 /*
1249 * We take down all of the non-boot CPUs in one shot to avoid races
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001250 * with the userspace trying to use the CPU hotplug at the same time
1251 */
Rusty Russelle0b582e2009-01-01 10:12:28 +10301252 cpumask_clear(frozen_cpus);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01001253
Fabian Frederick84117da2014-06-04 16:11:17 -07001254 pr_info("Disabling non-boot CPUs ...\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001255 for_each_online_cpu(cpu) {
James Morsed391e552016-08-17 13:50:25 +01001256 if (cpu == primary)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001257 continue;
Todd E Brandtbb3632c2014-06-06 05:40:17 -07001258 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001259 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
Todd E Brandtbb3632c2014-06-06 05:40:17 -07001260 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
Mike Travisfeae3202009-11-17 18:22:13 -06001261 if (!error)
Rusty Russelle0b582e2009-01-01 10:12:28 +10301262 cpumask_set_cpu(cpu, frozen_cpus);
Mike Travisfeae3202009-11-17 18:22:13 -06001263 else {
Fabian Frederick84117da2014-06-04 16:11:17 -07001264 pr_err("Error taking CPU%d down: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001265 break;
1266 }
1267 }
Joseph Cihula86886e52009-06-30 19:31:07 -07001268
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -07001269 if (!error)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001270 BUG_ON(num_online_cpus() > 1);
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -07001271 else
Fabian Frederick84117da2014-06-04 16:11:17 -07001272 pr_err("Non-boot CPUs are not disabled\n");
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -07001273
1274 /*
1275 * Make sure the CPUs won't be enabled by someone else. We need to do
1276 * this even in case of failure as all disable_nonboot_cpus() users are
1277 * supposed to do enable_nonboot_cpus() on the failure path.
1278 */
1279 cpu_hotplug_disabled++;
1280
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001281 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001282 return error;
1283}
1284
Suresh Siddhad0af9ee2009-08-19 18:05:36 -07001285void __weak arch_enable_nonboot_cpus_begin(void)
1286{
1287}
1288
1289void __weak arch_enable_nonboot_cpus_end(void)
1290{
1291}
1292
Mathias Krause71cf5ae2015-07-19 20:06:22 +02001293void enable_nonboot_cpus(void)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001294{
1295 int cpu, error;
Thierry Strudel49d0e062016-06-14 17:46:44 -07001296 struct device *cpu_device;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001297
1298 /* Allow everyone to use the CPU hotplug again */
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001299 cpu_maps_update_begin();
Lianwei Wang01b41152016-06-09 23:43:28 -07001300 __cpu_hotplug_enable();
Rusty Russelle0b582e2009-01-01 10:12:28 +10301301 if (cpumask_empty(frozen_cpus))
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -07001302 goto out;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001303
Fabian Frederick84117da2014-06-04 16:11:17 -07001304 pr_info("Enabling non-boot CPUs ...\n");
Suresh Siddhad0af9ee2009-08-19 18:05:36 -07001305
1306 arch_enable_nonboot_cpus_begin();
1307
Rusty Russelle0b582e2009-01-01 10:12:28 +10301308 for_each_cpu(cpu, frozen_cpus) {
Todd E Brandtbb3632c2014-06-06 05:40:17 -07001309 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001310 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
Todd E Brandtbb3632c2014-06-06 05:40:17 -07001311 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001312 if (!error) {
Fabian Frederick84117da2014-06-04 16:11:17 -07001313 pr_info("CPU%d is up\n", cpu);
Thierry Strudel49d0e062016-06-14 17:46:44 -07001314 cpu_device = get_cpu_device(cpu);
1315 if (!cpu_device)
1316 pr_err("%s: failed to get cpu%d device\n",
1317 __func__, cpu);
1318 else
1319 kobject_uevent(&cpu_device->kobj, KOBJ_ONLINE);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001320 continue;
1321 }
Fabian Frederick84117da2014-06-04 16:11:17 -07001322 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001323 }
Suresh Siddhad0af9ee2009-08-19 18:05:36 -07001324
1325 arch_enable_nonboot_cpus_end();
1326
Rusty Russelle0b582e2009-01-01 10:12:28 +10301327 cpumask_clear(frozen_cpus);
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -07001328out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001329 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001330}
Rusty Russelle0b582e2009-01-01 10:12:28 +10301331
Fenghua Yud7268a32011-11-15 21:59:31 +01001332static int __init alloc_frozen_cpus(void)
Rusty Russelle0b582e2009-01-01 10:12:28 +10301333{
1334 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1335 return -ENOMEM;
1336 return 0;
1337}
1338core_initcall(alloc_frozen_cpus);
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001339
1340/*
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001341 * When callbacks for CPU hotplug notifications are being executed, we must
1342 * ensure that the state of the system with respect to the tasks being frozen
1343 * or not, as reported by the notification, remains unchanged *throughout the
1344 * duration* of the execution of the callbacks.
1345 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1346 *
1347 * This synchronization is implemented by mutually excluding regular CPU
1348 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1349 * Hibernate notifications.
1350 */
1351static int
1352cpu_hotplug_pm_callback(struct notifier_block *nb,
1353 unsigned long action, void *ptr)
1354{
1355 switch (action) {
1356
1357 case PM_SUSPEND_PREPARE:
1358 case PM_HIBERNATION_PREPARE:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -07001359 cpu_hotplug_disable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001360 break;
1361
1362 case PM_POST_SUSPEND:
1363 case PM_POST_HIBERNATION:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -07001364 cpu_hotplug_enable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001365 break;
1366
1367 default:
1368 return NOTIFY_DONE;
1369 }
1370
1371 return NOTIFY_OK;
1372}
1373
1374
Fenghua Yud7268a32011-11-15 21:59:31 +01001375static int __init cpu_hotplug_pm_sync_init(void)
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001376{
Fenghua Yu6e32d472012-11-13 11:32:43 -08001377 /*
1378 * cpu_hotplug_pm_callback has higher priority than x86
1379 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1380 * to disable cpu hotplug to avoid cpu hotplug race.
1381 */
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001382 pm_notifier(cpu_hotplug_pm_callback, 0);
1383 return 0;
1384}
1385core_initcall(cpu_hotplug_pm_sync_init);
1386
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -07001387#endif /* CONFIG_PM_SLEEP_SMP */
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -07001388
1389#endif /* CONFIG_SMP */
Mike Travisb8d317d2008-07-24 18:21:29 -07001390
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001391/* Boot processor state steps */
1392static struct cpuhp_step cpuhp_bp_states[] = {
1393 [CPUHP_OFFLINE] = {
1394 .name = "offline",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001395 .startup.single = NULL,
1396 .teardown.single = NULL,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001397 },
1398#ifdef CONFIG_SMP
1399 [CPUHP_CREATE_THREADS]= {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001400 .name = "threads:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001401 .startup.single = smpboot_create_threads,
1402 .teardown.single = NULL,
Thomas Gleixner757c9892016-02-26 18:43:32 +00001403 .cant_stop = true,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001404 },
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001405 [CPUHP_PERF_PREPARE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001406 .name = "perf:prepare",
1407 .startup.single = perf_event_init_cpu,
1408 .teardown.single = perf_event_exit_cpu,
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001409 },
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001410 [CPUHP_WORKQUEUE_PREP] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001411 .name = "workqueue:prepare",
1412 .startup.single = workqueue_prepare_cpu,
1413 .teardown.single = NULL,
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001414 },
Thomas Gleixner27590dc2016-07-15 10:41:04 +02001415 [CPUHP_HRTIMERS_PREPARE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001416 .name = "hrtimers:prepare",
1417 .startup.single = hrtimers_prepare_cpu,
1418 .teardown.single = hrtimers_dead_cpu,
Thomas Gleixner27590dc2016-07-15 10:41:04 +02001419 },
Richard Weinberger31487f82016-07-13 17:17:01 +00001420 [CPUHP_SMPCFD_PREPARE] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001421 .name = "smpcfd:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001422 .startup.single = smpcfd_prepare_cpu,
1423 .teardown.single = smpcfd_dead_cpu,
Richard Weinberger31487f82016-07-13 17:17:01 +00001424 },
Richard Weinbergere6d49892016-08-18 14:57:17 +02001425 [CPUHP_RELAY_PREPARE] = {
1426 .name = "relay:prepare",
1427 .startup.single = relay_prepare_cpu,
1428 .teardown.single = NULL,
1429 },
Sebastian Andrzej Siewior6731d4f2016-08-23 14:53:19 +02001430 [CPUHP_SLAB_PREPARE] = {
1431 .name = "slab:prepare",
1432 .startup.single = slab_prepare_cpu,
1433 .teardown.single = slab_dead_cpu,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001434 },
Thomas Gleixner4df83742016-07-13 17:17:03 +00001435 [CPUHP_RCUTREE_PREP] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001436 .name = "RCU/tree:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001437 .startup.single = rcutree_prepare_cpu,
1438 .teardown.single = rcutree_dead_cpu,
Thomas Gleixner4df83742016-07-13 17:17:03 +00001439 },
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001440 /*
1441 * Preparatory and dead notifiers. Will be replaced once the notifiers
1442 * are converted to states.
1443 */
1444 [CPUHP_NOTIFY_PREPARE] = {
1445 .name = "notify:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001446 .startup.single = notify_prepare,
1447 .teardown.single = notify_dead,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001448 .skip_onerr = true,
Thomas Gleixner757c9892016-02-26 18:43:32 +00001449 .cant_stop = true,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001450 },
Richard Cochran4fae16d2016-07-27 11:08:18 +02001451 /*
1452 * On the tear-down path, timers_dead_cpu() must be invoked
1453 * before blk_mq_queue_reinit_notify() from notify_dead(),
1454 * otherwise a RCU stall occurs.
1455 */
Thomas Gleixner249d4a92017-12-27 21:37:25 +01001456 [CPUHP_TIMERS_PREPARE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001457 .name = "timers:dead",
Thomas Gleixner249d4a92017-12-27 21:37:25 +01001458 .startup.single = timers_prepare_cpu,
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001459 .teardown.single = timers_dead_cpu,
Richard Cochran4fae16d2016-07-27 11:08:18 +02001460 },
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001461 /* Kicks the plugged cpu into life */
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001462 [CPUHP_BRINGUP_CPU] = {
1463 .name = "cpu:bringup",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001464 .startup.single = bringup_cpu,
1465 .teardown.single = NULL,
Thomas Gleixner757c9892016-02-26 18:43:32 +00001466 .cant_stop = true,
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001467 },
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001468 /*
1469 * Handled on controll processor until the plugged processor manages
1470 * this itself.
1471 */
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001472 [CPUHP_TEARDOWN_CPU] = {
1473 .name = "cpu:teardown",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001474 .startup.single = NULL,
1475 .teardown.single = takedown_cpu,
Thomas Gleixner757c9892016-02-26 18:43:32 +00001476 .cant_stop = true,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001477 },
Thomas Gleixnera7c734142016-07-12 21:59:23 +02001478#else
1479 [CPUHP_BRINGUP_CPU] = { },
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001480#endif
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001481};
1482
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001483/* Application processor state steps */
1484static struct cpuhp_step cpuhp_ap_states[] = {
1485#ifdef CONFIG_SMP
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001486 /* Final state before CPU kills itself */
1487 [CPUHP_AP_IDLE_DEAD] = {
1488 .name = "idle:dead",
1489 },
1490 /*
1491 * Last state before CPU enters the idle loop to die. Transient state
1492 * for synchronization.
1493 */
1494 [CPUHP_AP_OFFLINE] = {
1495 .name = "ap:offline",
1496 .cant_stop = true,
1497 },
Thomas Gleixner9cf72432016-03-10 12:54:09 +01001498 /* First state is scheduler control. Interrupts are disabled */
1499 [CPUHP_AP_SCHED_STARTING] = {
1500 .name = "sched:starting",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001501 .startup.single = sched_cpu_starting,
1502 .teardown.single = sched_cpu_dying,
Thomas Gleixner9cf72432016-03-10 12:54:09 +01001503 },
Thomas Gleixner4df83742016-07-13 17:17:03 +00001504 [CPUHP_AP_RCUTREE_DYING] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001505 .name = "RCU/tree:dying",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001506 .startup.single = NULL,
1507 .teardown.single = rcutree_dying_cpu,
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001508 },
Lai Jiangshanff3d4fd2017-11-28 21:19:53 +08001509 [CPUHP_AP_SMPCFD_DYING] = {
1510 .name = "smpcfd:dying",
1511 .startup.single = NULL,
1512 .teardown.single = smpcfd_dying_cpu,
1513 },
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001514 /* Entry state on starting. Interrupts enabled from here on. Transient
1515 * state for synchronsization */
1516 [CPUHP_AP_ONLINE] = {
1517 .name = "ap:online",
1518 },
1519 /* Handle smpboot threads park/unpark */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001520 [CPUHP_AP_SMPBOOT_THREADS] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001521 .name = "smpboot/threads:online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001522 .startup.single = smpboot_unpark_threads,
Thomas Gleixner93335752018-05-29 19:05:25 +02001523 .teardown.single = smpboot_park_threads,
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001524 },
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001525 [CPUHP_AP_PERF_ONLINE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001526 .name = "perf:online",
1527 .startup.single = perf_event_init_cpu,
1528 .teardown.single = perf_event_exit_cpu,
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001529 },
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001530 [CPUHP_AP_WORKQUEUE_ONLINE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001531 .name = "workqueue:online",
1532 .startup.single = workqueue_online_cpu,
1533 .teardown.single = workqueue_offline_cpu,
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001534 },
Thomas Gleixner4df83742016-07-13 17:17:03 +00001535 [CPUHP_AP_RCUTREE_ONLINE] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001536 .name = "RCU/tree:online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001537 .startup.single = rcutree_online_cpu,
1538 .teardown.single = rcutree_offline_cpu,
Thomas Gleixner4df83742016-07-13 17:17:03 +00001539 },
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001540
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001541 /*
1542 * Online/down_prepare notifiers. Will be removed once the notifiers
1543 * are converted to states.
1544 */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001545 [CPUHP_AP_NOTIFY_ONLINE] = {
1546 .name = "notify:online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001547 .startup.single = notify_online,
1548 .teardown.single = notify_down_prepare,
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +02001549 .skip_onerr = true,
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001550 },
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001551#endif
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001552 /*
1553 * The dynamically registered state space is here
1554 */
1555
Thomas Gleixneraaddd7d2016-03-10 12:54:19 +01001556#ifdef CONFIG_SMP
1557 /* Last state is scheduler control setting the cpu active */
1558 [CPUHP_AP_ACTIVE] = {
1559 .name = "sched:active",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001560 .startup.single = sched_cpu_activate,
1561 .teardown.single = sched_cpu_deactivate,
Thomas Gleixneraaddd7d2016-03-10 12:54:19 +01001562 },
1563#endif
1564
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001565 /* CPU is fully up and running. */
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001566 [CPUHP_ONLINE] = {
1567 .name = "online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001568 .startup.single = NULL,
1569 .teardown.single = NULL,
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001570 },
1571};
1572
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001573/* Sanity check for callbacks */
1574static int cpuhp_cb_check(enum cpuhp_state state)
1575{
1576 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1577 return -EINVAL;
1578 return 0;
1579}
1580
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001581static void cpuhp_store_callbacks(enum cpuhp_state state,
1582 const char *name,
1583 int (*startup)(unsigned int cpu),
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001584 int (*teardown)(unsigned int cpu),
1585 bool multi_instance)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001586{
1587 /* (Un)Install the callbacks for further cpu hotplug operations */
1588 struct cpuhp_step *sp;
1589
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001590 sp = cpuhp_get_step(state);
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001591 sp->startup.single = startup;
1592 sp->teardown.single = teardown;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001593 sp->name = name;
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001594 sp->multi_instance = multi_instance;
1595 INIT_HLIST_HEAD(&sp->list);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001596}
1597
1598static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1599{
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001600 return cpuhp_get_step(state)->teardown.single;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001601}
1602
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001603/*
1604 * Call the startup/teardown function for a step either on the AP or
1605 * on the current CPU.
1606 */
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001607static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1608 struct hlist_node *node)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001609{
Thomas Gleixnera7246322016-08-12 19:49:38 +02001610 struct cpuhp_step *sp = cpuhp_get_step(state);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001611 int ret;
1612
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001613 if ((bringup && !sp->startup.single) ||
1614 (!bringup && !sp->teardown.single))
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001615 return 0;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001616 /*
1617 * The non AP bound callbacks can fail on bringup. On teardown
1618 * e.g. module removal we crash for now.
1619 */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001620#ifdef CONFIG_SMP
1621 if (cpuhp_is_ap_state(state))
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001622 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001623 else
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001624 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001625#else
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001626 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001627#endif
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001628 BUG_ON(ret && !bringup);
1629 return ret;
1630}
1631
1632/*
1633 * Called from __cpuhp_setup_state on a recoverable failure.
1634 *
1635 * Note: The teardown callbacks for rollback are not allowed to fail!
1636 */
1637static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001638 struct hlist_node *node)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001639{
1640 int cpu;
1641
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001642 /* Roll back the already executed steps on the other cpus */
1643 for_each_present_cpu(cpu) {
1644 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1645 int cpustate = st->state;
1646
1647 if (cpu >= failedcpu)
1648 break;
1649
1650 /* Did we invoke the startup call on that cpu ? */
1651 if (cpustate >= state)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001652 cpuhp_issue_call(cpu, state, false, node);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001653 }
1654}
1655
1656/*
1657 * Returns a free for dynamic slot assignment of the Online state. The states
1658 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1659 * by having no name assigned.
1660 */
1661static int cpuhp_reserve_state(enum cpuhp_state state)
1662{
1663 enum cpuhp_state i;
1664
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001665 for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
1666 if (cpuhp_ap_states[i].name)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001667 continue;
1668
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001669 cpuhp_ap_states[i].name = "Reserved";
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001670 return i;
1671 }
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001672 WARN(1, "No more dynamic states available for CPU hotplug\n");
1673 return -ENOSPC;
1674}
1675
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001676int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1677 bool invoke)
1678{
1679 struct cpuhp_step *sp;
1680 int cpu;
1681 int ret;
1682
1683 sp = cpuhp_get_step(state);
1684 if (sp->multi_instance == false)
1685 return -EINVAL;
1686
1687 get_online_cpus();
Sebastian Andrzej Siewior7ad6de42017-03-14 16:06:45 +01001688 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001689
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001690 if (!invoke || !sp->startup.multi)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001691 goto add_node;
1692
1693 /*
1694 * Try to call the startup callback for each present cpu
1695 * depending on the hotplug state of the cpu.
1696 */
1697 for_each_present_cpu(cpu) {
1698 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1699 int cpustate = st->state;
1700
1701 if (cpustate < state)
1702 continue;
1703
1704 ret = cpuhp_issue_call(cpu, state, true, node);
1705 if (ret) {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001706 if (sp->teardown.multi)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001707 cpuhp_rollback_install(cpu, state, node);
1708 goto err;
1709 }
1710 }
1711add_node:
1712 ret = 0;
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001713 hlist_add_head(node, &sp->list);
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001714
1715err:
Sebastian Andrzej Siewior7ad6de42017-03-14 16:06:45 +01001716 mutex_unlock(&cpuhp_state_mutex);
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001717 put_online_cpus();
1718 return ret;
1719}
1720EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1721
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001722/**
1723 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1724 * @state: The state to setup
1725 * @invoke: If true, the startup function is invoked for cpus where
1726 * cpu state >= @state
1727 * @startup: startup callback function
1728 * @teardown: teardown callback function
1729 *
1730 * Returns 0 if successful, otherwise a proper error code
1731 */
1732int __cpuhp_setup_state(enum cpuhp_state state,
1733 const char *name, bool invoke,
1734 int (*startup)(unsigned int cpu),
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001735 int (*teardown)(unsigned int cpu),
1736 bool multi_instance)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001737{
1738 int cpu, ret = 0;
1739 int dyn_state = 0;
1740
1741 if (cpuhp_cb_check(state) || !name)
1742 return -EINVAL;
1743
1744 get_online_cpus();
Sebastian Andrzej Siewior7ad6de42017-03-14 16:06:45 +01001745 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001746
1747 /* currently assignments for the ONLINE state are possible */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001748 if (state == CPUHP_AP_ONLINE_DYN) {
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001749 dyn_state = 1;
1750 ret = cpuhp_reserve_state(state);
1751 if (ret < 0)
1752 goto out;
1753 state = ret;
1754 }
1755
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001756 cpuhp_store_callbacks(state, name, startup, teardown, multi_instance);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001757
1758 if (!invoke || !startup)
1759 goto out;
1760
1761 /*
1762 * Try to call the startup callback for each present cpu
1763 * depending on the hotplug state of the cpu.
1764 */
1765 for_each_present_cpu(cpu) {
1766 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1767 int cpustate = st->state;
1768
1769 if (cpustate < state)
1770 continue;
1771
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001772 ret = cpuhp_issue_call(cpu, state, true, NULL);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001773 if (ret) {
Thomas Gleixnera7246322016-08-12 19:49:38 +02001774 if (teardown)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001775 cpuhp_rollback_install(cpu, state, NULL);
1776 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001777 goto out;
1778 }
1779 }
1780out:
Sebastian Andrzej Siewior7ad6de42017-03-14 16:06:45 +01001781 mutex_unlock(&cpuhp_state_mutex);
1782
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001783 put_online_cpus();
1784 if (!ret && dyn_state)
1785 return state;
1786 return ret;
1787}
1788EXPORT_SYMBOL(__cpuhp_setup_state);
1789
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001790int __cpuhp_state_remove_instance(enum cpuhp_state state,
1791 struct hlist_node *node, bool invoke)
1792{
1793 struct cpuhp_step *sp = cpuhp_get_step(state);
1794 int cpu;
1795
1796 BUG_ON(cpuhp_cb_check(state));
1797
1798 if (!sp->multi_instance)
1799 return -EINVAL;
1800
1801 get_online_cpus();
Sebastian Andrzej Siewior7ad6de42017-03-14 16:06:45 +01001802 mutex_lock(&cpuhp_state_mutex);
1803
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001804 if (!invoke || !cpuhp_get_teardown_cb(state))
1805 goto remove;
1806 /*
1807 * Call the teardown callback for each present cpu depending
1808 * on the hotplug state of the cpu. This function is not
1809 * allowed to fail currently!
1810 */
1811 for_each_present_cpu(cpu) {
1812 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1813 int cpustate = st->state;
1814
1815 if (cpustate >= state)
1816 cpuhp_issue_call(cpu, state, false, node);
1817 }
1818
1819remove:
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001820 hlist_del(node);
1821 mutex_unlock(&cpuhp_state_mutex);
1822 put_online_cpus();
1823
1824 return 0;
1825}
1826EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001827/**
1828 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1829 * @state: The state to remove
1830 * @invoke: If true, the teardown function is invoked for cpus where
1831 * cpu state >= @state
1832 *
1833 * The teardown callback is currently not allowed to fail. Think
1834 * about module removal!
1835 */
1836void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1837{
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001838 struct cpuhp_step *sp = cpuhp_get_step(state);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001839 int cpu;
1840
1841 BUG_ON(cpuhp_cb_check(state));
1842
1843 get_online_cpus();
Sebastian Andrzej Siewior7ad6de42017-03-14 16:06:45 +01001844 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001845
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001846 if (sp->multi_instance) {
1847 WARN(!hlist_empty(&sp->list),
1848 "Error: Removing state %d which has instances left.\n",
1849 state);
1850 goto remove;
1851 }
1852
Thomas Gleixnera7246322016-08-12 19:49:38 +02001853 if (!invoke || !cpuhp_get_teardown_cb(state))
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001854 goto remove;
1855
1856 /*
1857 * Call the teardown callback for each present cpu depending
1858 * on the hotplug state of the cpu. This function is not
1859 * allowed to fail currently!
1860 */
1861 for_each_present_cpu(cpu) {
1862 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1863 int cpustate = st->state;
1864
1865 if (cpustate >= state)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001866 cpuhp_issue_call(cpu, state, false, NULL);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001867 }
1868remove:
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001869 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
Sebastian Andrzej Siewior7ad6de42017-03-14 16:06:45 +01001870 mutex_unlock(&cpuhp_state_mutex);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001871 put_online_cpus();
1872}
1873EXPORT_SYMBOL(__cpuhp_remove_state);
1874
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001875#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1876static ssize_t show_cpuhp_state(struct device *dev,
1877 struct device_attribute *attr, char *buf)
1878{
1879 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1880
1881 return sprintf(buf, "%d\n", st->state);
1882}
1883static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1884
Thomas Gleixner757c9892016-02-26 18:43:32 +00001885static ssize_t write_cpuhp_target(struct device *dev,
1886 struct device_attribute *attr,
1887 const char *buf, size_t count)
1888{
1889 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1890 struct cpuhp_step *sp;
1891 int target, ret;
1892
1893 ret = kstrtoint(buf, 10, &target);
1894 if (ret)
1895 return ret;
1896
1897#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1898 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1899 return -EINVAL;
1900#else
1901 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1902 return -EINVAL;
1903#endif
1904
1905 ret = lock_device_hotplug_sysfs();
1906 if (ret)
1907 return ret;
1908
1909 mutex_lock(&cpuhp_state_mutex);
1910 sp = cpuhp_get_step(target);
1911 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1912 mutex_unlock(&cpuhp_state_mutex);
1913 if (ret)
Sebastian Andrzej Siewior106c77e2017-06-02 16:27:14 +02001914 goto out;
Thomas Gleixner757c9892016-02-26 18:43:32 +00001915
1916 if (st->state < target)
1917 ret = do_cpu_up(dev->id, target);
1918 else
1919 ret = do_cpu_down(dev->id, target);
Sebastian Andrzej Siewior106c77e2017-06-02 16:27:14 +02001920out:
Thomas Gleixner757c9892016-02-26 18:43:32 +00001921 unlock_device_hotplug();
1922 return ret ? ret : count;
1923}
1924
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001925static ssize_t show_cpuhp_target(struct device *dev,
1926 struct device_attribute *attr, char *buf)
1927{
1928 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1929
1930 return sprintf(buf, "%d\n", st->target);
1931}
Thomas Gleixner757c9892016-02-26 18:43:32 +00001932static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001933
1934static struct attribute *cpuhp_cpu_attrs[] = {
1935 &dev_attr_state.attr,
1936 &dev_attr_target.attr,
1937 NULL
1938};
1939
1940static struct attribute_group cpuhp_cpu_attr_group = {
1941 .attrs = cpuhp_cpu_attrs,
1942 .name = "hotplug",
1943 NULL
1944};
1945
1946static ssize_t show_cpuhp_states(struct device *dev,
1947 struct device_attribute *attr, char *buf)
1948{
1949 ssize_t cur, res = 0;
1950 int i;
1951
1952 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixner757c9892016-02-26 18:43:32 +00001953 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001954 struct cpuhp_step *sp = cpuhp_get_step(i);
1955
1956 if (sp->name) {
1957 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1958 buf += cur;
1959 res += cur;
1960 }
1961 }
1962 mutex_unlock(&cpuhp_state_mutex);
1963 return res;
1964}
1965static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1966
1967static struct attribute *cpuhp_cpu_root_attrs[] = {
1968 &dev_attr_states.attr,
1969 NULL
1970};
1971
1972static struct attribute_group cpuhp_cpu_root_attr_group = {
1973 .attrs = cpuhp_cpu_root_attrs,
1974 .name = "hotplug",
1975 NULL
1976};
1977
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02001978#ifdef CONFIG_HOTPLUG_SMT
1979
1980static const char *smt_states[] = {
1981 [CPU_SMT_ENABLED] = "on",
1982 [CPU_SMT_DISABLED] = "off",
1983 [CPU_SMT_FORCE_DISABLED] = "forceoff",
1984 [CPU_SMT_NOT_SUPPORTED] = "notsupported",
1985};
1986
1987static ssize_t
1988show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
1989{
1990 return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
1991}
1992
1993static void cpuhp_offline_cpu_device(unsigned int cpu)
1994{
1995 struct device *dev = get_cpu_device(cpu);
1996
1997 dev->offline = true;
1998 /* Tell user space about the state change */
1999 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2000}
2001
Thomas Gleixnere7cda2f2018-07-07 11:40:18 +02002002static void cpuhp_online_cpu_device(unsigned int cpu)
2003{
2004 struct device *dev = get_cpu_device(cpu);
2005
2006 dev->offline = false;
2007 /* Tell user space about the state change */
2008 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2009}
2010
Jiri Kosina5bdc5362019-05-30 00:09:39 +02002011int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02002012{
2013 int cpu, ret = 0;
2014
2015 cpu_maps_update_begin();
2016 for_each_online_cpu(cpu) {
2017 if (topology_is_primary_thread(cpu))
2018 continue;
2019 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2020 if (ret)
2021 break;
2022 /*
2023 * As this needs to hold the cpu maps lock it's impossible
2024 * to call device_offline() because that ends up calling
2025 * cpu_down() which takes cpu maps lock. cpu maps lock
2026 * needs to be held as this might race against in kernel
2027 * abusers of the hotplug machinery (thermal management).
2028 *
2029 * So nothing would update device:offline state. That would
2030 * leave the sysfs entry stale and prevent onlining after
2031 * smt control has been changed to 'off' again. This is
2032 * called under the sysfs hotplug lock, so it is properly
2033 * serialized against the regular offline usage.
2034 */
2035 cpuhp_offline_cpu_device(cpu);
2036 }
Jiri Kosinab410c572018-09-25 14:38:55 +02002037 if (!ret) {
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02002038 cpu_smt_control = ctrlval;
Jiri Kosinab410c572018-09-25 14:38:55 +02002039 arch_smt_update();
2040 }
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02002041 cpu_maps_update_done();
2042 return ret;
2043}
2044
Jiri Kosina5bdc5362019-05-30 00:09:39 +02002045int cpuhp_smt_enable(void)
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02002046{
Thomas Gleixnere7cda2f2018-07-07 11:40:18 +02002047 int cpu, ret = 0;
2048
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02002049 cpu_maps_update_begin();
2050 cpu_smt_control = CPU_SMT_ENABLED;
Jiri Kosinab410c572018-09-25 14:38:55 +02002051 arch_smt_update();
Thomas Gleixnere7cda2f2018-07-07 11:40:18 +02002052 for_each_present_cpu(cpu) {
2053 /* Skip online CPUs and CPUs on offline nodes */
2054 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2055 continue;
2056 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2057 if (ret)
2058 break;
2059 /* See comment in cpuhp_smt_disable() */
2060 cpuhp_online_cpu_device(cpu);
2061 }
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02002062 cpu_maps_update_done();
Thomas Gleixnere7cda2f2018-07-07 11:40:18 +02002063 return ret;
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02002064}
2065
2066static ssize_t
2067store_smt_control(struct device *dev, struct device_attribute *attr,
2068 const char *buf, size_t count)
2069{
2070 int ctrlval, ret;
2071
2072 if (sysfs_streq(buf, "on"))
2073 ctrlval = CPU_SMT_ENABLED;
2074 else if (sysfs_streq(buf, "off"))
2075 ctrlval = CPU_SMT_DISABLED;
2076 else if (sysfs_streq(buf, "forceoff"))
2077 ctrlval = CPU_SMT_FORCE_DISABLED;
2078 else
2079 return -EINVAL;
2080
2081 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2082 return -EPERM;
2083
2084 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2085 return -ENODEV;
2086
2087 ret = lock_device_hotplug_sysfs();
2088 if (ret)
2089 return ret;
2090
2091 if (ctrlval != cpu_smt_control) {
2092 switch (ctrlval) {
2093 case CPU_SMT_ENABLED:
Thomas Gleixnere7cda2f2018-07-07 11:40:18 +02002094 ret = cpuhp_smt_enable();
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02002095 break;
2096 case CPU_SMT_DISABLED:
2097 case CPU_SMT_FORCE_DISABLED:
2098 ret = cpuhp_smt_disable(ctrlval);
2099 break;
2100 }
2101 }
2102
2103 unlock_device_hotplug();
2104 return ret ? ret : count;
2105}
2106static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2107
2108static ssize_t
2109show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2110{
2111 bool active = topology_max_smt_threads() > 1;
2112
2113 return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
2114}
2115static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2116
2117static struct attribute *cpuhp_smt_attrs[] = {
2118 &dev_attr_control.attr,
2119 &dev_attr_active.attr,
2120 NULL
2121};
2122
2123static const struct attribute_group cpuhp_smt_attr_group = {
2124 .attrs = cpuhp_smt_attrs,
2125 .name = "smt",
2126 NULL
2127};
2128
2129static int __init cpu_smt_state_init(void)
2130{
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02002131 return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2132 &cpuhp_smt_attr_group);
2133}
2134
2135#else
2136static inline int cpu_smt_state_init(void) { return 0; }
2137#endif
2138
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00002139static int __init cpuhp_sysfs_init(void)
2140{
2141 int cpu, ret;
2142
Thomas Gleixnerf37486c2018-05-29 17:48:27 +02002143 ret = cpu_smt_state_init();
2144 if (ret)
2145 return ret;
2146
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00002147 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2148 &cpuhp_cpu_root_attr_group);
2149 if (ret)
2150 return ret;
2151
2152 for_each_possible_cpu(cpu) {
2153 struct device *dev = get_cpu_device(cpu);
2154
2155 if (!dev)
2156 continue;
2157 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2158 if (ret)
2159 return ret;
2160 }
2161 return 0;
2162}
2163device_initcall(cpuhp_sysfs_init);
2164#endif
2165
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002166/*
2167 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2168 * represents all NR_CPUS bits binary values of 1<<nr.
2169 *
Rusty Russelle0b582e2009-01-01 10:12:28 +10302170 * It is used by cpumask_of() to get a constant address to a CPU
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002171 * mask value that has a single bit set only.
2172 */
Mike Travisb8d317d2008-07-24 18:21:29 -07002173
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002174/* cpu_bit_bitmap[0] is empty - so we can back into it */
Michael Rodriguez4d519852011-03-22 16:34:07 -07002175#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002176#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2177#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2178#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
Mike Travisb8d317d2008-07-24 18:21:29 -07002179
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002180const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
Mike Travisb8d317d2008-07-24 18:21:29 -07002181
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002182 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2183 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2184#if BITS_PER_LONG > 32
2185 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2186 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
Mike Travisb8d317d2008-07-24 18:21:29 -07002187#endif
2188};
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002189EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
Rusty Russell2d3854a2008-11-05 13:39:10 +11002190
2191const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2192EXPORT_SYMBOL(cpu_all_bits);
Rusty Russellb3199c02008-12-30 09:05:14 +10302193
2194#ifdef CONFIG_INIT_ALL_POSSIBLE
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002195struct cpumask __cpu_possible_mask __read_mostly
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08002196 = {CPU_BITS_ALL};
Rusty Russellb3199c02008-12-30 09:05:14 +10302197#else
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002198struct cpumask __cpu_possible_mask __read_mostly;
Rusty Russellb3199c02008-12-30 09:05:14 +10302199#endif
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002200EXPORT_SYMBOL(__cpu_possible_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +10302201
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002202struct cpumask __cpu_online_mask __read_mostly;
2203EXPORT_SYMBOL(__cpu_online_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +10302204
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002205struct cpumask __cpu_present_mask __read_mostly;
2206EXPORT_SYMBOL(__cpu_present_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +10302207
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002208struct cpumask __cpu_active_mask __read_mostly;
2209EXPORT_SYMBOL(__cpu_active_mask);
Rusty Russell3fa41522008-12-30 09:05:16 +10302210
Rusty Russell3fa41522008-12-30 09:05:16 +10302211void init_cpu_present(const struct cpumask *src)
2212{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08002213 cpumask_copy(&__cpu_present_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +10302214}
2215
2216void init_cpu_possible(const struct cpumask *src)
2217{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08002218 cpumask_copy(&__cpu_possible_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +10302219}
2220
2221void init_cpu_online(const struct cpumask *src)
2222{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08002223 cpumask_copy(&__cpu_online_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +10302224}
Thomas Gleixnercff7d372016-02-26 18:43:28 +00002225
2226/*
2227 * Activate the first processor.
2228 */
2229void __init boot_cpu_init(void)
2230{
2231 int cpu = smp_processor_id();
2232
2233 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2234 set_cpu_online(cpu, true);
2235 set_cpu_active(cpu, true);
2236 set_cpu_present(cpu, true);
2237 set_cpu_possible(cpu, true);
2238}
2239
2240/*
2241 * Must be called _AFTER_ setting up the per_cpu areas
2242 */
Linus Torvalds6bb53ee2018-08-12 12:19:42 -07002243void __init boot_cpu_hotplug_init(void)
Thomas Gleixnercff7d372016-02-26 18:43:28 +00002244{
Abel Vesaaee08612018-08-15 00:26:00 +03002245#ifdef CONFIG_SMP
Thomas Gleixner8438e492018-06-29 16:05:48 +02002246 this_cpu_write(cpuhp_state.booted_once, true);
Abel Vesaaee08612018-08-15 00:26:00 +03002247#endif
Thomas Gleixner8438e492018-06-29 16:05:48 +02002248 this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
Thomas Gleixnercff7d372016-02-26 18:43:28 +00002249}
Todd Poynor5ee34122011-06-15 17:21:57 -07002250
Tyler Hickse2bd0772019-11-04 12:22:02 +01002251/*
2252 * These are used for a global "mitigations=" cmdline option for toggling
2253 * optional CPU mitigations.
2254 */
2255enum cpu_mitigations {
2256 CPU_MITIGATIONS_OFF,
2257 CPU_MITIGATIONS_AUTO,
2258 CPU_MITIGATIONS_AUTO_NOSMT,
2259};
2260
2261static enum cpu_mitigations cpu_mitigations __ro_after_init =
2262 CPU_MITIGATIONS_AUTO;
Josh Poimboeufedda9c32019-04-12 15:39:28 -05002263
2264static int __init mitigations_parse_cmdline(char *arg)
2265{
2266 if (!strcmp(arg, "off"))
2267 cpu_mitigations = CPU_MITIGATIONS_OFF;
2268 else if (!strcmp(arg, "auto"))
2269 cpu_mitigations = CPU_MITIGATIONS_AUTO;
2270 else if (!strcmp(arg, "auto,nosmt"))
2271 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
Geert Uytterhoeven0cbb0ae2019-05-16 09:09:35 +02002272 else
2273 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2274 arg);
Josh Poimboeufedda9c32019-04-12 15:39:28 -05002275
2276 return 0;
2277}
2278early_param("mitigations", mitigations_parse_cmdline);
Greg Kroah-Hartmanef4a5762019-05-14 21:04:42 +02002279
Tyler Hickse2bd0772019-11-04 12:22:02 +01002280/* mitigations=off */
2281bool cpu_mitigations_off(void)
2282{
2283 return cpu_mitigations == CPU_MITIGATIONS_OFF;
2284}
2285EXPORT_SYMBOL_GPL(cpu_mitigations_off);
2286
2287/* mitigations=auto,nosmt */
2288bool cpu_mitigations_auto_nosmt(void)
2289{
2290 return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
2291}
2292EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
Greg Kroah-Hartman258971b2019-11-16 11:05:12 +01002293
Todd Poynor5ee34122011-06-15 17:21:57 -07002294static ATOMIC_NOTIFIER_HEAD(idle_notifier);
2295
2296void idle_notifier_register(struct notifier_block *n)
2297{
2298 atomic_notifier_chain_register(&idle_notifier, n);
2299}
2300EXPORT_SYMBOL_GPL(idle_notifier_register);
2301
2302void idle_notifier_unregister(struct notifier_block *n)
2303{
2304 atomic_notifier_chain_unregister(&idle_notifier, n);
2305}
2306EXPORT_SYMBOL_GPL(idle_notifier_unregister);
2307
2308void idle_notifier_call_chain(unsigned long val)
2309{
2310 atomic_notifier_call_chain(&idle_notifier, val, NULL);
2311}
2312EXPORT_SYMBOL_GPL(idle_notifier_call_chain);