blob: 9d5f7b04025d858327b5c4cc018d312783134028 [file] [log] [blame]
Thomas Gleixner38498a62012-04-20 13:05:44 +00001/*
2 * Common SMP CPU bringup/teardown functions
3 */
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +00004#include <linux/cpu.h>
Thomas Gleixner29d5e042012-04-20 13:05:45 +00005#include <linux/err.h>
6#include <linux/smp.h>
Thomas Gleixner38498a62012-04-20 13:05:44 +00007#include <linux/init.h>
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +00008#include <linux/list.h>
9#include <linux/slab.h>
Thomas Gleixner29d5e042012-04-20 13:05:45 +000010#include <linux/sched.h>
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +000011#include <linux/export.h>
Thomas Gleixner29d5e042012-04-20 13:05:45 +000012#include <linux/percpu.h>
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +000013#include <linux/kthread.h>
14#include <linux/smpboot.h>
Thomas Gleixner38498a62012-04-20 13:05:44 +000015
16#include "smpboot.h"
17
Thomas Gleixner29d5e042012-04-20 13:05:45 +000018#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
Thomas Gleixner29d5e042012-04-20 13:05:45 +000019/*
20 * For the hotplug case we keep the task structs around and reuse
21 * them.
22 */
23static DEFINE_PER_CPU(struct task_struct *, idle_threads);
24
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -070025struct task_struct * __cpuinit idle_thread_get(unsigned int cpu)
Thomas Gleixner29d5e042012-04-20 13:05:45 +000026{
27 struct task_struct *tsk = per_cpu(idle_threads, cpu);
28
29 if (!tsk)
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -070030 return ERR_PTR(-ENOMEM);
Thomas Gleixner29d5e042012-04-20 13:05:45 +000031 init_idle(tsk, cpu);
32 return tsk;
33}
34
Thomas Gleixner29d5e042012-04-20 13:05:45 +000035void __init idle_thread_set_boot_cpu(void)
36{
37 per_cpu(idle_threads, smp_processor_id()) = current;
38}
39
Srivatsa S. Bhat4a70d2d2012-05-24 20:41:00 +053040/**
41 * idle_init - Initialize the idle thread for a cpu
42 * @cpu: The cpu for which the idle thread should be initialized
43 *
44 * Creates the thread if it does not exist.
45 */
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -070046static inline void idle_init(unsigned int cpu)
47{
48 struct task_struct *tsk = per_cpu(idle_threads, cpu);
49
50 if (!tsk) {
51 tsk = fork_idle(cpu);
52 if (IS_ERR(tsk))
53 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
54 else
55 per_cpu(idle_threads, cpu) = tsk;
56 }
57}
58
Thomas Gleixner29d5e042012-04-20 13:05:45 +000059/**
Srivatsa S. Bhat4a70d2d2012-05-24 20:41:00 +053060 * idle_threads_init - Initialize idle threads for all cpus
Thomas Gleixner29d5e042012-04-20 13:05:45 +000061 */
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -070062void __init idle_threads_init(void)
Thomas Gleixner29d5e042012-04-20 13:05:45 +000063{
Srivatsa S. Bhatee74d132012-05-24 20:40:55 +053064 unsigned int cpu, boot_cpu;
65
66 boot_cpu = smp_processor_id();
Thomas Gleixner29d5e042012-04-20 13:05:45 +000067
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -070068 for_each_possible_cpu(cpu) {
Srivatsa S. Bhatee74d132012-05-24 20:40:55 +053069 if (cpu != boot_cpu)
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -070070 idle_init(cpu);
Thomas Gleixner29d5e042012-04-20 13:05:45 +000071 }
Thomas Gleixner29d5e042012-04-20 13:05:45 +000072}
Thomas Gleixner29d5e042012-04-20 13:05:45 +000073#endif
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +000074
75static LIST_HEAD(hotplug_threads);
76static DEFINE_MUTEX(smpboot_threads_lock);
77
78struct smpboot_thread_data {
79 unsigned int cpu;
80 unsigned int status;
81 struct smp_hotplug_thread *ht;
82};
83
84enum {
85 HP_THREAD_NONE = 0,
86 HP_THREAD_ACTIVE,
87 HP_THREAD_PARKED,
88};
89
90/**
91 * smpboot_thread_fn - percpu hotplug thread loop function
92 * @data: thread data pointer
93 *
94 * Checks for thread stop and park conditions. Calls the necessary
95 * setup, cleanup, park and unpark functions for the registered
96 * thread.
97 *
98 * Returns 1 when the thread should exit, 0 otherwise.
99 */
100static int smpboot_thread_fn(void *data)
101{
102 struct smpboot_thread_data *td = data;
103 struct smp_hotplug_thread *ht = td->ht;
104
105 while (1) {
106 set_current_state(TASK_INTERRUPTIBLE);
107 preempt_disable();
108 if (kthread_should_stop()) {
109 set_current_state(TASK_RUNNING);
110 preempt_enable();
111 if (ht->cleanup)
112 ht->cleanup(td->cpu, cpu_online(td->cpu));
113 kfree(td);
114 return 0;
115 }
116
117 if (kthread_should_park()) {
118 __set_current_state(TASK_RUNNING);
119 preempt_enable();
120 if (ht->park && td->status == HP_THREAD_ACTIVE) {
121 BUG_ON(td->cpu != smp_processor_id());
122 ht->park(td->cpu);
123 td->status = HP_THREAD_PARKED;
124 }
125 kthread_parkme();
126 /* We might have been woken for stop */
127 continue;
128 }
129
130 BUG_ON(td->cpu != smp_processor_id());
131
132 /* Check for state change setup */
133 switch (td->status) {
134 case HP_THREAD_NONE:
135 preempt_enable();
136 if (ht->setup)
137 ht->setup(td->cpu);
138 td->status = HP_THREAD_ACTIVE;
139 preempt_disable();
140 break;
141 case HP_THREAD_PARKED:
142 preempt_enable();
143 if (ht->unpark)
144 ht->unpark(td->cpu);
145 td->status = HP_THREAD_ACTIVE;
146 preempt_disable();
147 break;
148 }
149
150 if (!ht->thread_should_run(td->cpu)) {
151 preempt_enable();
152 schedule();
153 } else {
154 set_current_state(TASK_RUNNING);
155 preempt_enable();
156 ht->thread_fn(td->cpu);
157 }
158 }
159}
160
161static int
162__smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
163{
164 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
165 struct smpboot_thread_data *td;
166
167 if (tsk)
168 return 0;
169
170 td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
171 if (!td)
172 return -ENOMEM;
173 td->cpu = cpu;
174 td->ht = ht;
175
176 tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
177 ht->thread_comm);
178 if (IS_ERR(tsk)) {
179 kfree(td);
180 return PTR_ERR(tsk);
181 }
182
183 get_task_struct(tsk);
184 *per_cpu_ptr(ht->store, cpu) = tsk;
185 return 0;
186}
187
188int smpboot_create_threads(unsigned int cpu)
189{
190 struct smp_hotplug_thread *cur;
191 int ret = 0;
192
193 mutex_lock(&smpboot_threads_lock);
194 list_for_each_entry(cur, &hotplug_threads, list) {
195 ret = __smpboot_create_thread(cur, cpu);
196 if (ret)
197 break;
198 }
199 mutex_unlock(&smpboot_threads_lock);
200 return ret;
201}
202
203static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
204{
205 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
206
207 kthread_unpark(tsk);
208}
209
210void smpboot_unpark_threads(unsigned int cpu)
211{
212 struct smp_hotplug_thread *cur;
213
214 mutex_lock(&smpboot_threads_lock);
215 list_for_each_entry(cur, &hotplug_threads, list)
216 smpboot_unpark_thread(cur, cpu);
217 mutex_unlock(&smpboot_threads_lock);
218}
219
220static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
221{
222 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
223
224 if (tsk)
225 kthread_park(tsk);
226}
227
228void smpboot_park_threads(unsigned int cpu)
229{
230 struct smp_hotplug_thread *cur;
231
232 mutex_lock(&smpboot_threads_lock);
233 list_for_each_entry_reverse(cur, &hotplug_threads, list)
234 smpboot_park_thread(cur, cpu);
235 mutex_unlock(&smpboot_threads_lock);
236}
237
238static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
239{
240 unsigned int cpu;
241
242 /* We need to destroy also the parked threads of offline cpus */
243 for_each_possible_cpu(cpu) {
244 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
245
246 if (tsk) {
247 kthread_stop(tsk);
248 put_task_struct(tsk);
249 *per_cpu_ptr(ht->store, cpu) = NULL;
250 }
251 }
252}
253
254/**
255 * smpboot_register_percpu_thread - Register a per_cpu thread related to hotplug
256 * @plug_thread: Hotplug thread descriptor
257 *
258 * Creates and starts the threads on all online cpus.
259 */
260int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
261{
262 unsigned int cpu;
263 int ret = 0;
264
265 mutex_lock(&smpboot_threads_lock);
266 for_each_online_cpu(cpu) {
267 ret = __smpboot_create_thread(plug_thread, cpu);
268 if (ret) {
269 smpboot_destroy_threads(plug_thread);
270 goto out;
271 }
272 smpboot_unpark_thread(plug_thread, cpu);
273 }
274 list_add(&plug_thread->list, &hotplug_threads);
275out:
276 mutex_unlock(&smpboot_threads_lock);
277 return ret;
278}
279EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
280
281/**
282 * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
283 * @plug_thread: Hotplug thread descriptor
284 *
285 * Stops all threads on all possible cpus.
286 */
287void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
288{
289 get_online_cpus();
290 mutex_lock(&smpboot_threads_lock);
291 list_del(&plug_thread->list);
292 smpboot_destroy_threads(plug_thread);
293 mutex_unlock(&smpboot_threads_lock);
294 put_online_cpus();
295}
296EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);