blob: 1c2e5bcfb1f7896d92805a04900abb0484f397d9 [file] [log] [blame]
Nicolas Pitre1c33be52012-04-12 02:56:10 -04001/*
2 * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
3 *
4 * Created by: Nicolas Pitre, March 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/interrupt.h>
17#include <linux/cpu_pm.h>
Nicolas Pitre71ce1de2012-10-26 02:36:17 -040018#include <linux/cpu.h>
Lorenzo Pieralisi3f09d472012-05-16 15:55:54 +010019#include <linux/cpumask.h>
Nicolas Pitre71ce1de2012-10-26 02:36:17 -040020#include <linux/kthread.h>
21#include <linux/wait.h>
Lorenzo Pieralisi3f09d472012-05-16 15:55:54 +010022#include <linux/clockchips.h>
23#include <linux/hrtimer.h>
24#include <linux/tick.h>
Nicolas Pitre1c33be52012-04-12 02:56:10 -040025#include <linux/mm.h>
26#include <linux/string.h>
27#include <linux/irqchip/arm-gic.h>
28
29#include <asm/smp_plat.h>
30#include <asm/suspend.h>
31#include <asm/mcpm.h>
32#include <asm/bL_switcher.h>
33
34
35/*
36 * Use our own MPIDR accessors as the generic ones in asm/cputype.h have
37 * __attribute_const__ and we don't want the compiler to assume any
38 * constness here as the value _does_ change along some code paths.
39 */
40
41static int read_mpidr(void)
42{
43 unsigned int id;
44 asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id));
45 return id & MPIDR_HWID_BITMASK;
46}
47
48/*
49 * bL switcher core code.
50 */
51
52static void bL_do_switch(void *_unused)
53{
54 unsigned mpidr, cpuid, clusterid, ob_cluster, ib_cluster;
55
Nicolas Pitre1c33be52012-04-12 02:56:10 -040056 pr_debug("%s\n", __func__);
57
58 mpidr = read_mpidr();
59 cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
60 clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
61 ob_cluster = clusterid;
62 ib_cluster = clusterid ^ 1;
63
64 /*
65 * Our state has been saved at this point. Let's release our
66 * inbound CPU.
67 */
68 mcpm_set_entry_vector(cpuid, ib_cluster, cpu_resume);
69 sev();
70
71 /*
72 * From this point, we must assume that our counterpart CPU might
73 * have taken over in its parallel world already, as if execution
74 * just returned from cpu_suspend(). It is therefore important to
75 * be very careful not to make any change the other guy is not
76 * expecting. This is why we need stack isolation.
77 *
78 * Fancy under cover tasks could be performed here. For now
79 * we have none.
80 */
81
82 /* Let's put ourself down. */
83 mcpm_cpu_power_down();
84
85 /* should never get here */
86 BUG();
87}
88
89/*
Nicolas Pitrec052de22012-11-27 15:55:33 -050090 * Stack isolation. To ensure 'current' remains valid, we just use another
91 * piece of our thread's stack space which should be fairly lightly used.
92 * The selected area starts just above the thread_info structure located
93 * at the very bottom of the stack, aligned to a cache line, and indexed
94 * with the cluster number.
Nicolas Pitre1c33be52012-04-12 02:56:10 -040095 */
Nicolas Pitrec052de22012-11-27 15:55:33 -050096#define STACK_SIZE 512
Nicolas Pitre1c33be52012-04-12 02:56:10 -040097extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
98static int bL_switchpoint(unsigned long _arg)
99{
100 unsigned int mpidr = read_mpidr();
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400101 unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
Nicolas Pitrec052de22012-11-27 15:55:33 -0500102 void *stack = current_thread_info() + 1;
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400103 stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
Nicolas Pitrec052de22012-11-27 15:55:33 -0500104 stack += clusterid * STACK_SIZE + STACK_SIZE;
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400105 call_with_stack(bL_do_switch, (void *)_arg, stack);
106 BUG();
107}
108
109/*
110 * Generic switcher interface
111 */
112
Nicolas Pitreed967622012-07-05 21:33:26 -0400113static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS];
114
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400115/*
116 * bL_switch_to - Switch to a specific cluster for the current CPU
117 * @new_cluster_id: the ID of the cluster to switch to.
118 *
119 * This function must be called on the CPU to be switched.
120 * Returns 0 on success, else a negative status code.
121 */
122static int bL_switch_to(unsigned int new_cluster_id)
123{
124 unsigned int mpidr, cpuid, clusterid, ob_cluster, ib_cluster, this_cpu;
Lorenzo Pieralisi3f09d472012-05-16 15:55:54 +0100125 struct tick_device *tdev;
126 enum clock_event_mode tdev_mode;
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400127 int ret;
128
129 mpidr = read_mpidr();
130 cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
131 clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
132 ob_cluster = clusterid;
133 ib_cluster = clusterid ^ 1;
134
135 if (new_cluster_id == clusterid)
136 return 0;
137
138 pr_debug("before switch: CPU %d in cluster %d\n", cpuid, clusterid);
139
140 /* Close the gate for our entry vectors */
141 mcpm_set_entry_vector(cpuid, ob_cluster, NULL);
142 mcpm_set_entry_vector(cpuid, ib_cluster, NULL);
143
144 /*
145 * Let's wake up the inbound CPU now in case it requires some delay
146 * to come online, but leave it gated in our entry vector code.
147 */
148 ret = mcpm_cpu_power_up(cpuid, ib_cluster);
149 if (ret) {
150 pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
151 return ret;
152 }
153
154 /*
155 * From this point we are entering the switch critical zone
156 * and can't take any interrupts anymore.
157 */
158 local_irq_disable();
159 local_fiq_disable();
160
161 this_cpu = smp_processor_id();
162
163 /* redirect GIC's SGIs to our counterpart */
Nicolas Pitreed967622012-07-05 21:33:26 -0400164 gic_migrate_target(bL_gic_id[cpuid][ib_cluster]);
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400165
166 /*
167 * Raise a SGI on the inbound CPU to make sure it doesn't stall
168 * in a possible WFI, such as in mcpm_power_down().
169 */
170 arch_send_wakeup_ipi_mask(cpumask_of(this_cpu));
171
Lorenzo Pieralisi3f09d472012-05-16 15:55:54 +0100172 tdev = tick_get_device(this_cpu);
173 if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
174 tdev = NULL;
175 if (tdev) {
176 tdev_mode = tdev->evtdev->mode;
177 clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
178 }
179
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400180 ret = cpu_pm_enter();
181
182 /* we can not tolerate errors at this point */
183 if (ret)
184 panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
185
186 /* Flip the cluster in the CPU logical map for this CPU. */
187 cpu_logical_map(this_cpu) ^= (1 << 8);
188
189 /* Let's do the actual CPU switch. */
190 ret = cpu_suspend(0, bL_switchpoint);
191 if (ret > 0)
192 panic("%s: cpu_suspend() returned %d\n", __func__, ret);
193
194 /* We are executing on the inbound CPU at this point */
195 mpidr = read_mpidr();
196 cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
197 clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
198 pr_debug("after switch: CPU %d in cluster %d\n", cpuid, clusterid);
199 BUG_ON(clusterid != ib_cluster);
200
201 mcpm_cpu_powered_up();
202
203 ret = cpu_pm_exit();
204
Lorenzo Pieralisi3f09d472012-05-16 15:55:54 +0100205 if (tdev) {
206 clockevents_set_mode(tdev->evtdev, tdev_mode);
207 clockevents_program_event(tdev->evtdev,
208 tdev->evtdev->next_event, 1);
209 }
210
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400211 local_fiq_enable();
212 local_irq_enable();
213
214 if (ret)
215 pr_err("%s exiting with error %d\n", __func__, ret);
216 return ret;
217}
218
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400219struct bL_thread {
220 struct task_struct *task;
221 wait_queue_head_t wq;
222 int wanted_cluster;
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400223};
224
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400225static struct bL_thread bL_threads[NR_CPUS];
226
227static int bL_switcher_thread(void *arg)
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400228{
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400229 struct bL_thread *t = arg;
230 struct sched_param param = { .sched_priority = 1 };
231 int cluster;
232
233 sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
234
235 do {
236 if (signal_pending(current))
237 flush_signals(current);
238 wait_event_interruptible(t->wq,
239 t->wanted_cluster != -1 ||
240 kthread_should_stop());
241 cluster = xchg(&t->wanted_cluster, -1);
242 if (cluster != -1)
243 bL_switch_to(cluster);
244 } while (!kthread_should_stop());
245
246 return 0;
247}
248
249static struct task_struct * __init bL_switcher_thread_create(int cpu, void *arg)
250{
251 struct task_struct *task;
252
253 task = kthread_create_on_node(bL_switcher_thread, arg,
254 cpu_to_node(cpu), "kswitcher_%d", cpu);
255 if (!IS_ERR(task)) {
256 kthread_bind(task, cpu);
257 wake_up_process(task);
258 } else
259 pr_err("%s failed for CPU %d\n", __func__, cpu);
260 return task;
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400261}
262
263/*
264 * bL_switch_request - Switch to a specific cluster for the given CPU
265 *
266 * @cpu: the CPU to switch
267 * @new_cluster_id: the ID of the cluster to switch to.
268 *
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400269 * This function causes a cluster switch on the given CPU by waking up
270 * the appropriate switcher thread. This function may or may not return
271 * before the switch has occurred.
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400272 */
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400273int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400274{
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400275 struct bL_thread *t;
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400276
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400277 if (cpu >= ARRAY_SIZE(bL_threads)) {
278 pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
279 return -EINVAL;
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400280 }
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400281
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400282 t = &bL_threads[cpu];
283 if (IS_ERR(t->task))
284 return PTR_ERR(t->task);
285 if (!t->task)
286 return -ESRCH;
287
288 t->wanted_cluster = new_cluster_id;
289 wake_up(&t->wq);
290 return 0;
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400291}
292EXPORT_SYMBOL_GPL(bL_switch_request);
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400293
Nicolas Pitre9797a0e2012-11-21 11:53:27 -0500294/*
295 * Activation and configuration code.
296 */
297
298static cpumask_t bL_switcher_removed_logical_cpus;
299
300static void __init bL_switcher_restore_cpus(void)
301{
302 int i;
303
304 for_each_cpu(i, &bL_switcher_removed_logical_cpus)
305 cpu_up(i);
306}
307
308static int __init bL_switcher_halve_cpus(void)
309{
310 int cpu, cluster, i, ret;
311 cpumask_t cluster_mask[2], common_mask;
312
313 cpumask_clear(&bL_switcher_removed_logical_cpus);
314 cpumask_clear(&cluster_mask[0]);
315 cpumask_clear(&cluster_mask[1]);
316
317 for_each_online_cpu(i) {
318 cpu = cpu_logical_map(i) & 0xff;
319 cluster = (cpu_logical_map(i) >> 8) & 0xff;
320 if (cluster >= 2) {
321 pr_err("%s: only dual cluster systems are supported\n", __func__);
322 return -EINVAL;
323 }
324 cpumask_set_cpu(cpu, &cluster_mask[cluster]);
325 }
326
327 if (!cpumask_and(&common_mask, &cluster_mask[0], &cluster_mask[1])) {
328 pr_err("%s: no common set of CPUs\n", __func__);
329 return -EINVAL;
330 }
331
332 for_each_online_cpu(i) {
333 cpu = cpu_logical_map(i) & 0xff;
334 cluster = (cpu_logical_map(i) >> 8) & 0xff;
335
336 if (cpumask_test_cpu(cpu, &common_mask)) {
Nicolas Pitreed967622012-07-05 21:33:26 -0400337 /* Let's take note of the GIC ID for this CPU */
338 int gic_id = gic_get_cpu_id(i);
339 if (gic_id < 0) {
340 pr_err("%s: bad GIC ID for CPU %d\n", __func__, i);
341 return -EINVAL;
342 }
343 bL_gic_id[cpu][cluster] = gic_id;
344 pr_info("GIC ID for CPU %u cluster %u is %u\n",
345 cpu, cluster, gic_id);
346
Nicolas Pitre9797a0e2012-11-21 11:53:27 -0500347 /*
348 * We keep only those logical CPUs which number
349 * is equal to their physical CPU number. This is
350 * not perfect but good enough for now.
351 */
352 if (cpu == i)
353 continue;
354 }
355
356 ret = cpu_down(i);
357 if (ret) {
358 bL_switcher_restore_cpus();
359 return ret;
360 }
361 cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus);
362 }
363
364 return 0;
365}
366
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400367static int __init bL_switcher_init(void)
368{
Nicolas Pitre9797a0e2012-11-21 11:53:27 -0500369 int cpu, ret;
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400370
371 pr_info("big.LITTLE switcher initializing\n");
372
Nicolas Pitre9797a0e2012-11-21 11:53:27 -0500373 if (MAX_NR_CLUSTERS != 2) {
374 pr_err("%s: only dual cluster systems are supported\n", __func__);
375 return -EINVAL;
376 }
377
378 cpu_hotplug_driver_lock();
379 ret = bL_switcher_halve_cpus();
380 if (ret) {
381 cpu_hotplug_driver_unlock();
382 return ret;
383 }
384
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400385 for_each_online_cpu(cpu) {
386 struct bL_thread *t = &bL_threads[cpu];
387 init_waitqueue_head(&t->wq);
388 t->wanted_cluster = -1;
389 t->task = bL_switcher_thread_create(cpu, t);
390 }
Nicolas Pitre9797a0e2012-11-21 11:53:27 -0500391 cpu_hotplug_driver_unlock();
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400392
393 pr_info("big.LITTLE switcher initialized\n");
394 return 0;
395}
396
397late_initcall(bL_switcher_init);