blob: 4caca71c906ff1c7794955970bcb20a1de399694 [file] [log] [blame]
Nicolas Pitre1c33be52012-04-12 02:56:10 -04001/*
2 * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
3 *
4 * Created by: Nicolas Pitre, March 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/interrupt.h>
17#include <linux/cpu_pm.h>
Nicolas Pitre71ce1de2012-10-26 02:36:17 -040018#include <linux/cpu.h>
Lorenzo Pieralisi3f09d472012-05-16 15:55:54 +010019#include <linux/cpumask.h>
Nicolas Pitre71ce1de2012-10-26 02:36:17 -040020#include <linux/kthread.h>
21#include <linux/wait.h>
Lorenzo Pieralisi3f09d472012-05-16 15:55:54 +010022#include <linux/clockchips.h>
23#include <linux/hrtimer.h>
24#include <linux/tick.h>
Nicolas Pitre1c33be52012-04-12 02:56:10 -040025#include <linux/mm.h>
26#include <linux/string.h>
27#include <linux/irqchip/arm-gic.h>
28
29#include <asm/smp_plat.h>
30#include <asm/suspend.h>
31#include <asm/mcpm.h>
32#include <asm/bL_switcher.h>
33
34
35/*
36 * Use our own MPIDR accessors as the generic ones in asm/cputype.h have
37 * __attribute_const__ and we don't want the compiler to assume any
38 * constness here as the value _does_ change along some code paths.
39 */
40
41static int read_mpidr(void)
42{
43 unsigned int id;
44 asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id));
45 return id & MPIDR_HWID_BITMASK;
46}
47
48/*
49 * bL switcher core code.
50 */
51
52static void bL_do_switch(void *_unused)
53{
54 unsigned mpidr, cpuid, clusterid, ob_cluster, ib_cluster;
55
Nicolas Pitre1c33be52012-04-12 02:56:10 -040056 pr_debug("%s\n", __func__);
57
58 mpidr = read_mpidr();
59 cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
60 clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
61 ob_cluster = clusterid;
62 ib_cluster = clusterid ^ 1;
63
64 /*
65 * Our state has been saved at this point. Let's release our
66 * inbound CPU.
67 */
68 mcpm_set_entry_vector(cpuid, ib_cluster, cpu_resume);
69 sev();
70
71 /*
72 * From this point, we must assume that our counterpart CPU might
73 * have taken over in its parallel world already, as if execution
74 * just returned from cpu_suspend(). It is therefore important to
75 * be very careful not to make any change the other guy is not
76 * expecting. This is why we need stack isolation.
77 *
78 * Fancy under cover tasks could be performed here. For now
79 * we have none.
80 */
81
82 /* Let's put ourself down. */
83 mcpm_cpu_power_down();
84
85 /* should never get here */
86 BUG();
87}
88
89/*
Nicolas Pitrec052de22012-11-27 15:55:33 -050090 * Stack isolation. To ensure 'current' remains valid, we just use another
91 * piece of our thread's stack space which should be fairly lightly used.
92 * The selected area starts just above the thread_info structure located
93 * at the very bottom of the stack, aligned to a cache line, and indexed
94 * with the cluster number.
Nicolas Pitre1c33be52012-04-12 02:56:10 -040095 */
Nicolas Pitrec052de22012-11-27 15:55:33 -050096#define STACK_SIZE 512
Nicolas Pitre1c33be52012-04-12 02:56:10 -040097extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
98static int bL_switchpoint(unsigned long _arg)
99{
100 unsigned int mpidr = read_mpidr();
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400101 unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
Nicolas Pitrec052de22012-11-27 15:55:33 -0500102 void *stack = current_thread_info() + 1;
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400103 stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
Nicolas Pitrec052de22012-11-27 15:55:33 -0500104 stack += clusterid * STACK_SIZE + STACK_SIZE;
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400105 call_with_stack(bL_do_switch, (void *)_arg, stack);
106 BUG();
107}
108
109/*
110 * Generic switcher interface
111 */
112
113/*
114 * bL_switch_to - Switch to a specific cluster for the current CPU
115 * @new_cluster_id: the ID of the cluster to switch to.
116 *
117 * This function must be called on the CPU to be switched.
118 * Returns 0 on success, else a negative status code.
119 */
120static int bL_switch_to(unsigned int new_cluster_id)
121{
122 unsigned int mpidr, cpuid, clusterid, ob_cluster, ib_cluster, this_cpu;
Lorenzo Pieralisi3f09d472012-05-16 15:55:54 +0100123 struct tick_device *tdev;
124 enum clock_event_mode tdev_mode;
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400125 int ret;
126
127 mpidr = read_mpidr();
128 cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
129 clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
130 ob_cluster = clusterid;
131 ib_cluster = clusterid ^ 1;
132
133 if (new_cluster_id == clusterid)
134 return 0;
135
136 pr_debug("before switch: CPU %d in cluster %d\n", cpuid, clusterid);
137
138 /* Close the gate for our entry vectors */
139 mcpm_set_entry_vector(cpuid, ob_cluster, NULL);
140 mcpm_set_entry_vector(cpuid, ib_cluster, NULL);
141
142 /*
143 * Let's wake up the inbound CPU now in case it requires some delay
144 * to come online, but leave it gated in our entry vector code.
145 */
146 ret = mcpm_cpu_power_up(cpuid, ib_cluster);
147 if (ret) {
148 pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
149 return ret;
150 }
151
152 /*
153 * From this point we are entering the switch critical zone
154 * and can't take any interrupts anymore.
155 */
156 local_irq_disable();
157 local_fiq_disable();
158
159 this_cpu = smp_processor_id();
160
161 /* redirect GIC's SGIs to our counterpart */
162 gic_migrate_target(cpuid + ib_cluster*4);
163
164 /*
165 * Raise a SGI on the inbound CPU to make sure it doesn't stall
166 * in a possible WFI, such as in mcpm_power_down().
167 */
168 arch_send_wakeup_ipi_mask(cpumask_of(this_cpu));
169
Lorenzo Pieralisi3f09d472012-05-16 15:55:54 +0100170 tdev = tick_get_device(this_cpu);
171 if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
172 tdev = NULL;
173 if (tdev) {
174 tdev_mode = tdev->evtdev->mode;
175 clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
176 }
177
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400178 ret = cpu_pm_enter();
179
180 /* we can not tolerate errors at this point */
181 if (ret)
182 panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
183
184 /* Flip the cluster in the CPU logical map for this CPU. */
185 cpu_logical_map(this_cpu) ^= (1 << 8);
186
187 /* Let's do the actual CPU switch. */
188 ret = cpu_suspend(0, bL_switchpoint);
189 if (ret > 0)
190 panic("%s: cpu_suspend() returned %d\n", __func__, ret);
191
192 /* We are executing on the inbound CPU at this point */
193 mpidr = read_mpidr();
194 cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
195 clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
196 pr_debug("after switch: CPU %d in cluster %d\n", cpuid, clusterid);
197 BUG_ON(clusterid != ib_cluster);
198
199 mcpm_cpu_powered_up();
200
201 ret = cpu_pm_exit();
202
Lorenzo Pieralisi3f09d472012-05-16 15:55:54 +0100203 if (tdev) {
204 clockevents_set_mode(tdev->evtdev, tdev_mode);
205 clockevents_program_event(tdev->evtdev,
206 tdev->evtdev->next_event, 1);
207 }
208
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400209 local_fiq_enable();
210 local_irq_enable();
211
212 if (ret)
213 pr_err("%s exiting with error %d\n", __func__, ret);
214 return ret;
215}
216
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400217struct bL_thread {
218 struct task_struct *task;
219 wait_queue_head_t wq;
220 int wanted_cluster;
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400221};
222
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400223static struct bL_thread bL_threads[NR_CPUS];
224
225static int bL_switcher_thread(void *arg)
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400226{
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400227 struct bL_thread *t = arg;
228 struct sched_param param = { .sched_priority = 1 };
229 int cluster;
230
231 sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
232
233 do {
234 if (signal_pending(current))
235 flush_signals(current);
236 wait_event_interruptible(t->wq,
237 t->wanted_cluster != -1 ||
238 kthread_should_stop());
239 cluster = xchg(&t->wanted_cluster, -1);
240 if (cluster != -1)
241 bL_switch_to(cluster);
242 } while (!kthread_should_stop());
243
244 return 0;
245}
246
247static struct task_struct * __init bL_switcher_thread_create(int cpu, void *arg)
248{
249 struct task_struct *task;
250
251 task = kthread_create_on_node(bL_switcher_thread, arg,
252 cpu_to_node(cpu), "kswitcher_%d", cpu);
253 if (!IS_ERR(task)) {
254 kthread_bind(task, cpu);
255 wake_up_process(task);
256 } else
257 pr_err("%s failed for CPU %d\n", __func__, cpu);
258 return task;
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400259}
260
261/*
262 * bL_switch_request - Switch to a specific cluster for the given CPU
263 *
264 * @cpu: the CPU to switch
265 * @new_cluster_id: the ID of the cluster to switch to.
266 *
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400267 * This function causes a cluster switch on the given CPU by waking up
268 * the appropriate switcher thread. This function may or may not return
269 * before the switch has occurred.
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400270 */
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400271int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400272{
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400273 struct bL_thread *t;
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400274
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400275 if (cpu >= ARRAY_SIZE(bL_threads)) {
276 pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
277 return -EINVAL;
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400278 }
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400279
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400280 t = &bL_threads[cpu];
281 if (IS_ERR(t->task))
282 return PTR_ERR(t->task);
283 if (!t->task)
284 return -ESRCH;
285
286 t->wanted_cluster = new_cluster_id;
287 wake_up(&t->wq);
288 return 0;
Nicolas Pitre1c33be52012-04-12 02:56:10 -0400289}
290EXPORT_SYMBOL_GPL(bL_switch_request);
Nicolas Pitre71ce1de2012-10-26 02:36:17 -0400291
292static int __init bL_switcher_init(void)
293{
294 int cpu;
295
296 pr_info("big.LITTLE switcher initializing\n");
297
298 for_each_online_cpu(cpu) {
299 struct bL_thread *t = &bL_threads[cpu];
300 init_waitqueue_head(&t->wq);
301 t->wanted_cluster = -1;
302 t->task = bL_switcher_thread_create(cpu, t);
303 }
304
305 pr_info("big.LITTLE switcher initialized\n");
306 return 0;
307}
308
309late_initcall(bL_switcher_init);