blob: 65dee78180822911d773ab66e7f6b32323d78b40 [file] [log] [blame]
Nicolas Pitre1e904e12012-05-02 20:56:52 -04001/*
2 * arch/arm/mach-vexpress/dcscb.c - Dual Cluster System Configuration Block
3 *
4 * Created by: Nicolas Pitre, May 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/io.h>
15#include <linux/spinlock.h>
16#include <linux/errno.h>
17#include <linux/of_address.h>
18#include <linux/vexpress.h>
19
20#include <asm/mcpm.h>
21#include <asm/proc-fns.h>
22#include <asm/cacheflush.h>
23#include <asm/cputype.h>
24#include <asm/cp15.h>
25
26
27#define RST_HOLD0 0x0
28#define RST_HOLD1 0x4
29#define SYS_SWRESET 0x8
30#define RST_STAT0 0xc
31#define RST_STAT1 0x10
32#define EAG_CFG_R 0x20
33#define EAG_CFG_W 0x24
34#define KFC_CFG_R 0x28
35#define KFC_CFG_W 0x2c
36#define DCS_CFG_R 0x30
37
38/*
39 * We can't use regular spinlocks. In the switcher case, it is possible
40 * for an outbound CPU to call power_down() while its inbound counterpart
41 * is already live using the same logical CPU number which trips lockdep
42 * debugging.
43 */
44static arch_spinlock_t dcscb_lock = __ARCH_SPIN_LOCK_UNLOCKED;
45
46static void __iomem *dcscb_base;
Nicolas Pitre13eae1442012-07-16 22:07:10 -040047static int dcscb_use_count[4][2];
Nicolas Pitre1e904e12012-05-02 20:56:52 -040048
49static int dcscb_power_up(unsigned int cpu, unsigned int cluster)
50{
51 unsigned int rst_hold, cpumask = (1 << cpu);
52
53 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
54 if (cpu >= 4 || cluster >= 2)
55 return -EINVAL;
56
57 /*
58 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
59 * variant exists, we need to disable IRQs manually here.
60 */
61 local_irq_disable();
62 arch_spin_lock(&dcscb_lock);
63
Nicolas Pitre13eae1442012-07-16 22:07:10 -040064 dcscb_use_count[cpu][cluster]++;
65 if (dcscb_use_count[cpu][cluster] == 1) {
66 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
67 if (rst_hold & (1 << 8)) {
68 /* remove cluster reset and add individual CPU's reset */
69 rst_hold &= ~(1 << 8);
70 rst_hold |= 0xf;
71 }
72 rst_hold &= ~(cpumask | (cpumask << 4));
73 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
74 } else if (dcscb_use_count[cpu][cluster] != 2) {
75 /*
76 * The only possible values are:
77 * 0 = CPU down
78 * 1 = CPU (still) up
79 * 2 = CPU requested to be up before it had a chance
80 * to actually make itself down.
81 * Any other value is a bug.
82 */
83 BUG();
Nicolas Pitre1e904e12012-05-02 20:56:52 -040084 }
Nicolas Pitre1e904e12012-05-02 20:56:52 -040085
86 arch_spin_unlock(&dcscb_lock);
87 local_irq_enable();
88
89 return 0;
90}
91
92static void dcscb_power_down(void)
93{
Nicolas Pitre13eae1442012-07-16 22:07:10 -040094 unsigned int mpidr, cpu, cluster, rst_hold, cpumask;
95 bool last_man = false, skip_wfi = false;
Nicolas Pitre1e904e12012-05-02 20:56:52 -040096
97 mpidr = read_cpuid_mpidr();
98 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
99 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
100 cpumask = (1 << cpu);
101
102 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
103 BUG_ON(cpu >= 4 || cluster >= 2);
104
105 arch_spin_lock(&dcscb_lock);
Nicolas Pitre13eae1442012-07-16 22:07:10 -0400106 dcscb_use_count[cpu][cluster]--;
107 if (dcscb_use_count[cpu][cluster] == 0) {
108 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
109 rst_hold |= cpumask;
110 if (((rst_hold | (rst_hold >> 4)) & 0xf) == 0xf) {
111 rst_hold |= (1 << 8);
112 last_man = true;
113 }
114 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
115 } else if (dcscb_use_count[cpu][cluster] == 1) {
116 /*
117 * A power_up request went ahead of us.
118 * Even if we do not want to shut this CPU down,
119 * the caller expects a certain state as if the WFI
120 * was aborted. So let's continue with cache cleaning.
121 */
122 skip_wfi = true;
123 } else
124 BUG();
Nicolas Pitre1e904e12012-05-02 20:56:52 -0400125 arch_spin_unlock(&dcscb_lock);
Nicolas Pitre1e904e12012-05-02 20:56:52 -0400126
127 /*
128 * Now let's clean our L1 cache and shut ourself down.
129 * If we're the last CPU in this cluster then clean L2 too.
130 */
131
132 /*
133 * A15/A7 can hit in the cache with SCTLR.C=0, so we don't need
134 * a preliminary flush here for those CPUs. At least, that's
135 * the theory -- without the extra flush, Linux explodes on
136 * RTSM (to be investigated)..
137 */
138 flush_cache_louis();
139 set_cr(get_cr() & ~CR_C);
140
141 if (!last_man) {
142 flush_cache_louis();
143 } else {
144 flush_cache_all();
145 outer_flush_all();
146 }
147
148 /* Disable local coherency by clearing the ACTLR "SMP" bit: */
149 set_auxcr(get_auxcr() & ~(1 << 6));
150
151 /* Now we are prepared for power-down, do it: */
152 dsb();
Nicolas Pitre13eae1442012-07-16 22:07:10 -0400153 if (!skip_wfi)
154 wfi();
Nicolas Pitre1e904e12012-05-02 20:56:52 -0400155
156 /* Not dead at this point? Let our caller cope. */
157}
158
159static const struct mcpm_platform_ops dcscb_power_ops = {
160 .power_up = dcscb_power_up,
161 .power_down = dcscb_power_down,
162};
163
Nicolas Pitre13eae1442012-07-16 22:07:10 -0400164static void __init dcscb_usage_count_init(void)
165{
166 unsigned int mpidr, cpu, cluster;
167
168 mpidr = read_cpuid_mpidr();
169 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
170 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
171
172 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
173 BUG_ON(cpu >= 4 || cluster >= 2);
174 dcscb_use_count[cpu][cluster] = 1;
175}
176
Nicolas Pitre1e904e12012-05-02 20:56:52 -0400177static int __init dcscb_init(void)
178{
179 struct device_node *node;
180 int ret;
181
182 node = of_find_compatible_node(NULL, NULL, "arm,rtsm,dcscb");
183 if (!node)
184 return -ENODEV;
185 dcscb_base = of_iomap(node, 0);
186 if (!dcscb_base)
187 return -EADDRNOTAVAIL;
188
Nicolas Pitre13eae1442012-07-16 22:07:10 -0400189 dcscb_usage_count_init();
190
Nicolas Pitre1e904e12012-05-02 20:56:52 -0400191 ret = mcpm_platform_register(&dcscb_power_ops);
192 if (ret) {
193 iounmap(dcscb_base);
194 return ret;
195 }
196
197 pr_info("VExpress DCSCB support installed\n");
198
199 /*
200 * Future entries into the kernel can now go
201 * through the cluster entry vectors.
202 */
203 vexpress_flags_set(virt_to_phys(mcpm_entry_point));
204
205 return 0;
206}
207
208early_initcall(dcscb_init);