blob: 44aa7b040e8267e41468e3d009d22b12fb4974b8 [file] [log] [blame]
Nicolas Pitre1e904e12012-05-02 20:56:52 -04001/*
2 * arch/arm/mach-vexpress/dcscb.c - Dual Cluster System Configuration Block
3 *
4 * Created by: Nicolas Pitre, May 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/io.h>
15#include <linux/spinlock.h>
16#include <linux/errno.h>
17#include <linux/of_address.h>
18#include <linux/vexpress.h>
19
20#include <asm/mcpm.h>
21#include <asm/proc-fns.h>
22#include <asm/cacheflush.h>
23#include <asm/cputype.h>
24#include <asm/cp15.h>
25
26
27#define RST_HOLD0 0x0
28#define RST_HOLD1 0x4
29#define SYS_SWRESET 0x8
30#define RST_STAT0 0xc
31#define RST_STAT1 0x10
32#define EAG_CFG_R 0x20
33#define EAG_CFG_W 0x24
34#define KFC_CFG_R 0x28
35#define KFC_CFG_W 0x2c
36#define DCS_CFG_R 0x30
37
38/*
39 * We can't use regular spinlocks. In the switcher case, it is possible
40 * for an outbound CPU to call power_down() while its inbound counterpart
41 * is already live using the same logical CPU number which trips lockdep
42 * debugging.
43 */
44static arch_spinlock_t dcscb_lock = __ARCH_SPIN_LOCK_UNLOCKED;
45
46static void __iomem *dcscb_base;
Nicolas Pitre13eae1442012-07-16 22:07:10 -040047static int dcscb_use_count[4][2];
Nicolas Pitre2f2df892012-07-18 16:41:16 -040048static int dcscb_allcpus_mask[2];
Nicolas Pitre1e904e12012-05-02 20:56:52 -040049
50static int dcscb_power_up(unsigned int cpu, unsigned int cluster)
51{
52 unsigned int rst_hold, cpumask = (1 << cpu);
Nicolas Pitre2f2df892012-07-18 16:41:16 -040053 unsigned int all_mask = dcscb_allcpus_mask[cluster];
Nicolas Pitre1e904e12012-05-02 20:56:52 -040054
55 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
56 if (cpu >= 4 || cluster >= 2)
57 return -EINVAL;
58
59 /*
60 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
61 * variant exists, we need to disable IRQs manually here.
62 */
63 local_irq_disable();
64 arch_spin_lock(&dcscb_lock);
65
Nicolas Pitre13eae1442012-07-16 22:07:10 -040066 dcscb_use_count[cpu][cluster]++;
67 if (dcscb_use_count[cpu][cluster] == 1) {
68 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
69 if (rst_hold & (1 << 8)) {
70 /* remove cluster reset and add individual CPU's reset */
71 rst_hold &= ~(1 << 8);
Nicolas Pitre2f2df892012-07-18 16:41:16 -040072 rst_hold |= all_mask;
Nicolas Pitre13eae1442012-07-16 22:07:10 -040073 }
74 rst_hold &= ~(cpumask | (cpumask << 4));
75 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
76 } else if (dcscb_use_count[cpu][cluster] != 2) {
77 /*
78 * The only possible values are:
79 * 0 = CPU down
80 * 1 = CPU (still) up
81 * 2 = CPU requested to be up before it had a chance
82 * to actually make itself down.
83 * Any other value is a bug.
84 */
85 BUG();
Nicolas Pitre1e904e12012-05-02 20:56:52 -040086 }
Nicolas Pitre1e904e12012-05-02 20:56:52 -040087
88 arch_spin_unlock(&dcscb_lock);
89 local_irq_enable();
90
91 return 0;
92}
93
94static void dcscb_power_down(void)
95{
Nicolas Pitre2f2df892012-07-18 16:41:16 -040096 unsigned int mpidr, cpu, cluster, rst_hold, cpumask, all_mask;
Nicolas Pitre13eae1442012-07-16 22:07:10 -040097 bool last_man = false, skip_wfi = false;
Nicolas Pitre1e904e12012-05-02 20:56:52 -040098
99 mpidr = read_cpuid_mpidr();
100 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
101 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
102 cpumask = (1 << cpu);
Nicolas Pitre2f2df892012-07-18 16:41:16 -0400103 all_mask = dcscb_allcpus_mask[cluster];
Nicolas Pitre1e904e12012-05-02 20:56:52 -0400104
105 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
106 BUG_ON(cpu >= 4 || cluster >= 2);
107
108 arch_spin_lock(&dcscb_lock);
Nicolas Pitre13eae1442012-07-16 22:07:10 -0400109 dcscb_use_count[cpu][cluster]--;
110 if (dcscb_use_count[cpu][cluster] == 0) {
111 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
112 rst_hold |= cpumask;
Nicolas Pitre2f2df892012-07-18 16:41:16 -0400113 if (((rst_hold | (rst_hold >> 4)) & all_mask) == all_mask) {
Nicolas Pitre13eae1442012-07-16 22:07:10 -0400114 rst_hold |= (1 << 8);
115 last_man = true;
116 }
117 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
118 } else if (dcscb_use_count[cpu][cluster] == 1) {
119 /*
120 * A power_up request went ahead of us.
121 * Even if we do not want to shut this CPU down,
122 * the caller expects a certain state as if the WFI
123 * was aborted. So let's continue with cache cleaning.
124 */
125 skip_wfi = true;
126 } else
127 BUG();
Nicolas Pitre1e904e12012-05-02 20:56:52 -0400128 arch_spin_unlock(&dcscb_lock);
Nicolas Pitre1e904e12012-05-02 20:56:52 -0400129
130 /*
131 * Now let's clean our L1 cache and shut ourself down.
132 * If we're the last CPU in this cluster then clean L2 too.
133 */
134
135 /*
136 * A15/A7 can hit in the cache with SCTLR.C=0, so we don't need
137 * a preliminary flush here for those CPUs. At least, that's
138 * the theory -- without the extra flush, Linux explodes on
139 * RTSM (to be investigated)..
140 */
141 flush_cache_louis();
142 set_cr(get_cr() & ~CR_C);
143
144 if (!last_man) {
145 flush_cache_louis();
146 } else {
147 flush_cache_all();
148 outer_flush_all();
149 }
150
151 /* Disable local coherency by clearing the ACTLR "SMP" bit: */
152 set_auxcr(get_auxcr() & ~(1 << 6));
153
154 /* Now we are prepared for power-down, do it: */
155 dsb();
Nicolas Pitre13eae1442012-07-16 22:07:10 -0400156 if (!skip_wfi)
157 wfi();
Nicolas Pitre1e904e12012-05-02 20:56:52 -0400158
159 /* Not dead at this point? Let our caller cope. */
160}
161
162static const struct mcpm_platform_ops dcscb_power_ops = {
163 .power_up = dcscb_power_up,
164 .power_down = dcscb_power_down,
165};
166
Nicolas Pitre13eae1442012-07-16 22:07:10 -0400167static void __init dcscb_usage_count_init(void)
168{
169 unsigned int mpidr, cpu, cluster;
170
171 mpidr = read_cpuid_mpidr();
172 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
173 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
174
175 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
176 BUG_ON(cpu >= 4 || cluster >= 2);
177 dcscb_use_count[cpu][cluster] = 1;
178}
179
Nicolas Pitre1e904e12012-05-02 20:56:52 -0400180static int __init dcscb_init(void)
181{
182 struct device_node *node;
Nicolas Pitre2f2df892012-07-18 16:41:16 -0400183 unsigned int cfg;
Nicolas Pitre1e904e12012-05-02 20:56:52 -0400184 int ret;
185
186 node = of_find_compatible_node(NULL, NULL, "arm,rtsm,dcscb");
187 if (!node)
188 return -ENODEV;
189 dcscb_base = of_iomap(node, 0);
190 if (!dcscb_base)
191 return -EADDRNOTAVAIL;
Nicolas Pitre2f2df892012-07-18 16:41:16 -0400192 cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
193 dcscb_allcpus_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1;
194 dcscb_allcpus_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1;
Nicolas Pitre13eae1442012-07-16 22:07:10 -0400195 dcscb_usage_count_init();
196
Nicolas Pitre1e904e12012-05-02 20:56:52 -0400197 ret = mcpm_platform_register(&dcscb_power_ops);
198 if (ret) {
199 iounmap(dcscb_base);
200 return ret;
201 }
202
203 pr_info("VExpress DCSCB support installed\n");
204
205 /*
206 * Future entries into the kernel can now go
207 * through the cluster entry vectors.
208 */
209 vexpress_flags_set(virt_to_phys(mcpm_entry_point));
210
211 return 0;
212}
213
214early_initcall(dcscb_init);