blob: cdc606816cdd1c5f33bc074e1d8701915e48fe1f [file] [log] [blame]
Nicolas Pitre11b277e2013-08-06 19:10:08 +01001/*
2 * arch/arm/mach-vexpress/tc2_pm.c - TC2 power management support
3 *
4 * Created by: Nicolas Pitre, October 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * Some portions of this file were originally written by Achin Gupta
8 * Copyright: (C) 2012 ARM Limited
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/init.h>
16#include <linux/io.h>
17#include <linux/kernel.h>
18#include <linux/of_address.h>
19#include <linux/spinlock.h>
20#include <linux/errno.h>
21
22#include <asm/mcpm.h>
23#include <asm/proc-fns.h>
24#include <asm/cacheflush.h>
25#include <asm/cputype.h>
26#include <asm/cp15.h>
27
28#include <linux/arm-cci.h>
29
30#include "spc.h"
31
32/* SCC conf registers */
33#define A15_CONF 0x400
34#define A7_CONF 0x500
35#define SYS_INFO 0x700
36#define SPC_BASE 0xb00
37
38/*
39 * We can't use regular spinlocks. In the switcher case, it is possible
40 * for an outbound CPU to call power_down() after its inbound counterpart
41 * is already live using the same logical CPU number which trips lockdep
42 * debugging.
43 */
44static arch_spinlock_t tc2_pm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
45
46#define TC2_CLUSTERS 2
47#define TC2_MAX_CPUS_PER_CLUSTER 3
48
49static unsigned int tc2_nr_cpus[TC2_CLUSTERS];
50
51/* Keep per-cpu usage count to cope with unordered up/down requests */
52static int tc2_pm_use_count[TC2_MAX_CPUS_PER_CLUSTER][TC2_CLUSTERS];
53
54#define tc2_cluster_unused(cluster) \
55 (!tc2_pm_use_count[0][cluster] && \
56 !tc2_pm_use_count[1][cluster] && \
57 !tc2_pm_use_count[2][cluster])
58
59static int tc2_pm_power_up(unsigned int cpu, unsigned int cluster)
60{
61 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
62 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster])
63 return -EINVAL;
64
65 /*
66 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
67 * variant exists, we need to disable IRQs manually here.
68 */
69 local_irq_disable();
70 arch_spin_lock(&tc2_pm_lock);
71
72 if (tc2_cluster_unused(cluster))
73 ve_spc_powerdown(cluster, false);
74
75 tc2_pm_use_count[cpu][cluster]++;
76 if (tc2_pm_use_count[cpu][cluster] == 1) {
77 ve_spc_set_resume_addr(cluster, cpu,
78 virt_to_phys(mcpm_entry_point));
79 ve_spc_cpu_wakeup_irq(cluster, cpu, true);
80 } else if (tc2_pm_use_count[cpu][cluster] != 2) {
81 /*
82 * The only possible values are:
83 * 0 = CPU down
84 * 1 = CPU (still) up
85 * 2 = CPU requested to be up before it had a chance
86 * to actually make itself down.
87 * Any other value is a bug.
88 */
89 BUG();
90 }
91
92 arch_spin_unlock(&tc2_pm_lock);
93 local_irq_enable();
94
95 return 0;
96}
97
98static void tc2_pm_power_down(void)
99{
100 unsigned int mpidr, cpu, cluster;
101 bool last_man = false, skip_wfi = false;
102
103 mpidr = read_cpuid_mpidr();
104 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
105 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
106
107 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
108 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
109
110 __mcpm_cpu_going_down(cpu, cluster);
111
112 arch_spin_lock(&tc2_pm_lock);
113 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
114 tc2_pm_use_count[cpu][cluster]--;
115 if (tc2_pm_use_count[cpu][cluster] == 0) {
116 ve_spc_cpu_wakeup_irq(cluster, cpu, true);
117 if (tc2_cluster_unused(cluster)) {
118 ve_spc_powerdown(cluster, true);
119 ve_spc_global_wakeup_irq(true);
120 last_man = true;
121 }
122 } else if (tc2_pm_use_count[cpu][cluster] == 1) {
123 /*
124 * A power_up request went ahead of us.
125 * Even if we do not want to shut this CPU down,
126 * the caller expects a certain state as if the WFI
127 * was aborted. So let's continue with cache cleaning.
128 */
129 skip_wfi = true;
130 } else
131 BUG();
132
133 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
134 arch_spin_unlock(&tc2_pm_lock);
135
136 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) {
137 /*
138 * On the Cortex-A15 we need to disable
139 * L2 prefetching before flushing the cache.
140 */
141 asm volatile(
142 "mcr p15, 1, %0, c15, c0, 3 \n\t"
143 "isb \n\t"
144 "dsb "
145 : : "r" (0x400) );
146 }
147
148 /*
149 * We need to disable and flush the whole (L1 and L2) cache.
150 * Let's do it in the safest possible way i.e. with
151 * no memory access within the following sequence
152 * including the stack.
153 */
154 asm volatile(
155 "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
156 "bic r0, r0, #"__stringify(CR_C)" \n\t"
157 "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
158 "isb \n\t"
159 "bl v7_flush_dcache_all \n\t"
160 "clrex \n\t"
161 "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
162 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
163 "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
164 "isb \n\t"
165 "dsb "
166 : : : "r0","r1","r2","r3","r4","r5","r6","r7",
167 "r9","r10","r11","lr","memory");
168
169 cci_disable_port_by_cpu(mpidr);
170
171 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
172 } else {
173 /*
174 * If last man then undo any setup done previously.
175 */
176 if (last_man) {
177 ve_spc_powerdown(cluster, false);
178 ve_spc_global_wakeup_irq(false);
179 }
180
181 arch_spin_unlock(&tc2_pm_lock);
182
183 /*
184 * We need to disable and flush only the L1 cache.
185 * Let's do it in the safest possible way as above.
186 */
187 asm volatile(
188 "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
189 "bic r0, r0, #"__stringify(CR_C)" \n\t"
190 "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
191 "isb \n\t"
192 "bl v7_flush_dcache_louis \n\t"
193 "clrex \n\t"
194 "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
195 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
196 "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
197 "isb \n\t"
198 "dsb "
199 : : : "r0","r1","r2","r3","r4","r5","r6","r7",
200 "r9","r10","r11","lr","memory");
201 }
202
203 __mcpm_cpu_down(cpu, cluster);
204
205 /* Now we are prepared for power-down, do it: */
206 if (!skip_wfi)
207 wfi();
208
209 /* Not dead at this point? Let our caller cope. */
210}
211
212static void tc2_pm_powered_up(void)
213{
214 unsigned int mpidr, cpu, cluster;
215 unsigned long flags;
216
217 mpidr = read_cpuid_mpidr();
218 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
219 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
220
221 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
222 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
223
224 local_irq_save(flags);
225 arch_spin_lock(&tc2_pm_lock);
226
227 if (tc2_cluster_unused(cluster)) {
228 ve_spc_powerdown(cluster, false);
229 ve_spc_global_wakeup_irq(false);
230 }
231
232 if (!tc2_pm_use_count[cpu][cluster])
233 tc2_pm_use_count[cpu][cluster] = 1;
234
235 ve_spc_cpu_wakeup_irq(cluster, cpu, false);
236 ve_spc_set_resume_addr(cluster, cpu, 0);
237
238 arch_spin_unlock(&tc2_pm_lock);
239 local_irq_restore(flags);
240}
241
242static const struct mcpm_platform_ops tc2_pm_power_ops = {
243 .power_up = tc2_pm_power_up,
244 .power_down = tc2_pm_power_down,
245 .powered_up = tc2_pm_powered_up,
246};
247
248static bool __init tc2_pm_usage_count_init(void)
249{
250 unsigned int mpidr, cpu, cluster;
251
252 mpidr = read_cpuid_mpidr();
253 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
254 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
255
256 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
257 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) {
258 pr_err("%s: boot CPU is out of bound!\n", __func__);
259 return false;
260 }
261 tc2_pm_use_count[cpu][cluster] = 1;
262 return true;
263}
264
265/*
266 * Enable cluster-level coherency, in preparation for turning on the MMU.
267 */
268static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
269{
270 asm volatile (" \n"
271" cmp r0, #1 \n"
272" bxne lr \n"
273" b cci_enable_port_for_self ");
274}
275
276static int __init tc2_pm_init(void)
277{
278 int ret;
279 void __iomem *scc;
280 u32 a15_cluster_id, a7_cluster_id, sys_info;
281 struct device_node *np;
282
283 /*
284 * The power management-related features are hidden behind
285 * SCC registers. We need to extract runtime information like
286 * cluster ids and number of CPUs really available in clusters.
287 */
288 np = of_find_compatible_node(NULL, NULL,
289 "arm,vexpress-scc,v2p-ca15_a7");
290 scc = of_iomap(np, 0);
291 if (!scc)
292 return -ENODEV;
293
294 a15_cluster_id = readl_relaxed(scc + A15_CONF) & 0xf;
295 a7_cluster_id = readl_relaxed(scc + A7_CONF) & 0xf;
296 if (a15_cluster_id >= TC2_CLUSTERS || a7_cluster_id >= TC2_CLUSTERS)
297 return -EINVAL;
298
299 sys_info = readl_relaxed(scc + SYS_INFO);
300 tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf;
301 tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf;
302
303 /*
304 * A subset of the SCC registers is also used to communicate
305 * with the SPC (power controller). We need to be able to
306 * drive it very early in the boot process to power up
307 * processors, so we initialize the SPC driver here.
308 */
309 ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id);
310 if (ret)
311 return ret;
312
313 if (!cci_probed())
314 return -ENODEV;
315
316 if (!tc2_pm_usage_count_init())
317 return -EINVAL;
318
319 ret = mcpm_platform_register(&tc2_pm_power_ops);
320 if (!ret) {
321 mcpm_sync_init(tc2_pm_power_up_setup);
322 pr_info("TC2 power management initialized\n");
323 }
324 return ret;
325}
326
327early_initcall(tc2_pm_init);