blob: d42a07e334822bfabbda05e3fa8f2dbe0c3a2093 [file] [log] [blame]
Heiko Stuebnera7a2b312013-06-17 22:29:23 +02001/*
2 * Copyright (c) 2013 MundoReader S.L.
3 * Author: Heiko Stuebner <heiko@sntech.de>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/smp.h>
19#include <linux/io.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
Heiko Stuebnerd003b582014-10-15 10:23:00 -070022#include <linux/regmap.h>
23#include <linux/mfd/syscon.h>
Heiko Stuebnera7a2b312013-06-17 22:29:23 +020024
Kever Yang3ee851e2014-10-15 10:23:03 -070025#include <linux/reset.h>
26#include <linux/cpu.h>
Heiko Stuebnera7a2b312013-06-17 22:29:23 +020027#include <asm/cacheflush.h>
Romain Perierf54b91f2014-07-19 13:03:26 +000028#include <asm/cp15.h>
Heiko Stuebnera7a2b312013-06-17 22:29:23 +020029#include <asm/smp_scu.h>
30#include <asm/smp_plat.h>
31#include <asm/mach/map.h>
32
33#include "core.h"
34
35static void __iomem *scu_base_addr;
36static void __iomem *sram_base_addr;
37static int ncores;
38
39#define PMU_PWRDN_CON 0x08
40#define PMU_PWRDN_ST 0x0c
41
42#define PMU_PWRDN_SCU 4
43
Heiko Stuebnerd003b582014-10-15 10:23:00 -070044static struct regmap *pmu;
Heiko Stuebner9def7cc2015-11-04 20:25:16 +080045static int has_pmu = true;
Heiko Stuebnera7a2b312013-06-17 22:29:23 +020046
Heiko Stuebnerd003b582014-10-15 10:23:00 -070047static int pmu_power_domain_is_on(int pd)
Heiko Stuebnera7a2b312013-06-17 22:29:23 +020048{
Heiko Stuebnerd003b582014-10-15 10:23:00 -070049 u32 val;
50 int ret;
51
52 ret = regmap_read(pmu, PMU_PWRDN_ST, &val);
53 if (ret < 0)
54 return ret;
55
56 return !(val & BIT(pd));
Heiko Stuebnera7a2b312013-06-17 22:29:23 +020057}
58
Krzysztof Kozlowskibd76d732015-03-02 00:12:03 +010059static struct reset_control *rockchip_get_core_reset(int cpu)
Kever Yang3ee851e2014-10-15 10:23:03 -070060{
61 struct device *dev = get_cpu_device(cpu);
62 struct device_node *np;
63
64 /* The cpu device is only available after the initial core bringup */
65 if (dev)
66 np = dev->of_node;
67 else
68 np = of_get_cpu_node(cpu, 0);
69
70 return of_reset_control_get(np, NULL);
71}
72
Heiko Stuebnerd003b582014-10-15 10:23:00 -070073static int pmu_set_power_domain(int pd, bool on)
Heiko Stuebnera7a2b312013-06-17 22:29:23 +020074{
Heiko Stuebnerd003b582014-10-15 10:23:00 -070075 u32 val = (on) ? 0 : BIT(pd);
Caesar Wangfe4407c2015-06-09 17:49:57 +080076 struct reset_control *rstc = rockchip_get_core_reset(pd);
Heiko Stuebnerd003b582014-10-15 10:23:00 -070077 int ret;
Heiko Stuebnera7a2b312013-06-17 22:29:23 +020078
Caesar Wangfe4407c2015-06-09 17:49:57 +080079 if (IS_ERR(rstc) && read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
80 pr_err("%s: could not get reset control for core %d\n",
81 __func__, pd);
82 return PTR_ERR(rstc);
83 }
84
Kever Yang3ee851e2014-10-15 10:23:03 -070085 /*
86 * We need to soft reset the cpu when we turn off the cpu power domain,
87 * or else the active processors might be stalled when the individual
88 * processor is powered down.
89 */
Caesar Wangfe4407c2015-06-09 17:49:57 +080090 if (!IS_ERR(rstc) && !on)
91 reset_control_assert(rstc);
Kever Yang3ee851e2014-10-15 10:23:03 -070092
Heiko Stuebner9def7cc2015-11-04 20:25:16 +080093 if (has_pmu) {
94 ret = regmap_update_bits(pmu, PMU_PWRDN_CON, BIT(pd), val);
Heiko Stuebnerd003b582014-10-15 10:23:00 -070095 if (ret < 0) {
Heiko Stuebner9def7cc2015-11-04 20:25:16 +080096 pr_err("%s: could not update power domain\n",
Caesar Wang7f0b61a2015-06-09 17:49:59 +080097 __func__);
Heiko Stuebnerd003b582014-10-15 10:23:00 -070098 return ret;
99 }
Heiko Stuebner9def7cc2015-11-04 20:25:16 +0800100
101 ret = -1;
102 while (ret != on) {
103 ret = pmu_power_domain_is_on(pd);
104 if (ret < 0) {
105 pr_err("%s: could not read power domain state\n",
106 __func__);
107 return ret;
108 }
109 }
Heiko Stuebnerd003b582014-10-15 10:23:00 -0700110 }
111
Caesar Wangfe4407c2015-06-09 17:49:57 +0800112 if (!IS_ERR(rstc)) {
113 if (on)
114 reset_control_deassert(rstc);
115 reset_control_put(rstc);
116 }
117
Heiko Stuebnerd003b582014-10-15 10:23:00 -0700118 return 0;
Heiko Stuebnera7a2b312013-06-17 22:29:23 +0200119}
120
121/*
122 * Handling of CPU cores
123 */
124
Paul Gortmaker374d4dd2015-01-17 16:48:41 -0500125static int rockchip_boot_secondary(unsigned int cpu, struct task_struct *idle)
Heiko Stuebnera7a2b312013-06-17 22:29:23 +0200126{
Kever Yang3ee851e2014-10-15 10:23:03 -0700127 int ret;
128
Heiko Stuebner9def7cc2015-11-04 20:25:16 +0800129 if (!sram_base_addr || (has_pmu && !pmu)) {
Heiko Stuebnera7a2b312013-06-17 22:29:23 +0200130 pr_err("%s: sram or pmu missing for cpu boot\n", __func__);
131 return -ENXIO;
132 }
133
134 if (cpu >= ncores) {
135 pr_err("%s: cpu %d outside maximum number of cpus %d\n",
Caesar Wang7f0b61a2015-06-09 17:49:59 +0800136 __func__, cpu, ncores);
Heiko Stuebnera7a2b312013-06-17 22:29:23 +0200137 return -ENXIO;
138 }
139
140 /* start the core */
Kever Yang3ee851e2014-10-15 10:23:03 -0700141 ret = pmu_set_power_domain(0 + cpu, true);
142 if (ret < 0)
143 return ret;
144
145 if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
Caesar Wang7f0b61a2015-06-09 17:49:59 +0800146 /*
147 * We communicate with the bootrom to active the cpus other
Kever Yang3ee851e2014-10-15 10:23:03 -0700148 * than cpu0, after a blob of initialize code, they will
149 * stay at wfe state, once they are actived, they will check
150 * the mailbox:
151 * sram_base_addr + 4: 0xdeadbeaf
152 * sram_base_addr + 8: start address for pc
Caesar Wangfe4407c2015-06-09 17:49:57 +0800153 * The cpu0 need to wait the other cpus other than cpu0 entering
154 * the wfe state.The wait time is affected by many aspects.
155 * (e.g: cpu frequency, bootrom frequency, sram frequency, ...)
Caesar Wang7f0b61a2015-06-09 17:49:59 +0800156 */
Caesar Wangfe4407c2015-06-09 17:49:57 +0800157 mdelay(1); /* ensure the cpus other than cpu0 to startup */
158
Caesar Wangcb8cc372015-07-06 11:37:23 +0800159 writel(virt_to_phys(secondary_startup), sram_base_addr + 8);
Kever Yang3ee851e2014-10-15 10:23:03 -0700160 writel(0xDEADBEAF, sram_base_addr + 4);
161 dsb_sev();
162 }
163
164 return 0;
Heiko Stuebnera7a2b312013-06-17 22:29:23 +0200165}
166
167/**
168 * rockchip_smp_prepare_sram - populate necessary sram block
169 * Starting cores execute the code residing at the start of the on-chip sram
170 * after power-on. Therefore make sure, this sram region is reserved and
171 * big enough. After this check, copy the trampoline code that directs the
172 * core to the real startup code in ram into the sram-region.
173 * @node: mmio-sram device node
174 */
175static int __init rockchip_smp_prepare_sram(struct device_node *node)
176{
177 unsigned int trampoline_sz = &rockchip_secondary_trampoline_end -
178 &rockchip_secondary_trampoline;
179 struct resource res;
180 unsigned int rsize;
181 int ret;
182
183 ret = of_address_to_resource(node, 0, &res);
184 if (ret < 0) {
185 pr_err("%s: could not get address for node %s\n",
186 __func__, node->full_name);
187 return ret;
188 }
189
190 rsize = resource_size(&res);
191 if (rsize < trampoline_sz) {
192 pr_err("%s: reserved block with size 0x%x is to small for trampoline size 0x%x\n",
193 __func__, rsize, trampoline_sz);
194 return -EINVAL;
195 }
196
Heiko Stuebnera7a2b312013-06-17 22:29:23 +0200197 /* set the boot function for the sram code */
Russell King02b4e272015-05-19 17:06:44 +0100198 rockchip_boot_fn = virt_to_phys(secondary_startup);
Heiko Stuebnera7a2b312013-06-17 22:29:23 +0200199
200 /* copy the trampoline to sram, that runs during startup of the core */
201 memcpy(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
202 flush_cache_all();
203 outer_clean_range(0, trampoline_sz);
204
205 dsb_sev();
206
207 return 0;
208}
209
Krzysztof Kozlowskibd76d732015-03-02 00:12:03 +0100210static const struct regmap_config rockchip_pmu_regmap_config = {
Heiko Stuebnerd003b582014-10-15 10:23:00 -0700211 .reg_bits = 32,
212 .val_bits = 32,
213 .reg_stride = 4,
214};
215
216static int __init rockchip_smp_prepare_pmu(void)
217{
218 struct device_node *node;
219 void __iomem *pmu_base;
220
Heiko Stuebner6de2d212014-10-15 10:23:01 -0700221 /*
222 * This function is only called via smp_ops->smp_prepare_cpu().
223 * That only happens if a "/cpus" device tree node exists
224 * and has an "enable-method" property that selects the SMP
225 * operations defined herein.
226 */
227 node = of_find_node_by_path("/cpus");
228
229 pmu = syscon_regmap_lookup_by_phandle(node, "rockchip,pmu");
230 of_node_put(node);
231 if (!IS_ERR(pmu))
232 return 0;
233
Heiko Stuebnerd003b582014-10-15 10:23:00 -0700234 pmu = syscon_regmap_lookup_by_compatible("rockchip,rk3066-pmu");
235 if (!IS_ERR(pmu))
236 return 0;
237
238 /* fallback, create our own regmap for the pmu area */
239 pmu = NULL;
240 node = of_find_compatible_node(NULL, NULL, "rockchip,rk3066-pmu");
241 if (!node) {
242 pr_err("%s: could not find pmu dt node\n", __func__);
243 return -ENODEV;
244 }
245
246 pmu_base = of_iomap(node, 0);
247 if (!pmu_base) {
248 pr_err("%s: could not map pmu registers\n", __func__);
249 return -ENOMEM;
250 }
251
252 pmu = regmap_init_mmio(NULL, pmu_base, &rockchip_pmu_regmap_config);
253 if (IS_ERR(pmu)) {
254 int ret = PTR_ERR(pmu);
255
256 iounmap(pmu_base);
257 pmu = NULL;
258 pr_err("%s: regmap init failed\n", __func__);
259 return ret;
260 }
261
262 return 0;
263}
264
Heiko Stuebnera7a2b312013-06-17 22:29:23 +0200265static void __init rockchip_smp_prepare_cpus(unsigned int max_cpus)
266{
267 struct device_node *node;
268 unsigned int i;
269
Heiko Stuebnera7a2b312013-06-17 22:29:23 +0200270 node = of_find_compatible_node(NULL, NULL, "rockchip,rk3066-smp-sram");
271 if (!node) {
272 pr_err("%s: could not find sram dt node\n", __func__);
273 return;
274 }
275
Kever Yang3ee851e2014-10-15 10:23:03 -0700276 sram_base_addr = of_iomap(node, 0);
277 if (!sram_base_addr) {
278 pr_err("%s: could not map sram registers\n", __func__);
Heiko Stuebnera7a2b312013-06-17 22:29:23 +0200279 return;
Kever Yang3ee851e2014-10-15 10:23:03 -0700280 }
Heiko Stuebnera7a2b312013-06-17 22:29:23 +0200281
Heiko Stuebner9def7cc2015-11-04 20:25:16 +0800282 if (has_pmu && rockchip_smp_prepare_pmu())
Heiko Stuebnera7a2b312013-06-17 22:29:23 +0200283 return;
Heiko Stuebnera7a2b312013-06-17 22:29:23 +0200284
Kever Yang3ee851e2014-10-15 10:23:03 -0700285 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) {
286 if (rockchip_smp_prepare_sram(node))
287 return;
Heiko Stuebnera7a2b312013-06-17 22:29:23 +0200288
Kever Yang3ee851e2014-10-15 10:23:03 -0700289 /* enable the SCU power domain */
290 pmu_set_power_domain(PMU_PWRDN_SCU, true);
Heiko Stuebnera7a2b312013-06-17 22:29:23 +0200291
Kever Yang3ee851e2014-10-15 10:23:03 -0700292 node = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu");
293 if (!node) {
294 pr_err("%s: missing scu\n", __func__);
295 return;
296 }
297
298 scu_base_addr = of_iomap(node, 0);
299 if (!scu_base_addr) {
300 pr_err("%s: could not map scu registers\n", __func__);
301 return;
302 }
303
304 /*
305 * While the number of cpus is gathered from dt, also get the
306 * number of cores from the scu to verify this value when
307 * booting the cores.
308 */
309 ncores = scu_get_core_count(scu_base_addr);
310 pr_err("%s: ncores %d\n", __func__, ncores);
311
312 scu_enable(scu_base_addr);
313 } else {
314 unsigned int l2ctlr;
315
316 asm ("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
317 ncores = ((l2ctlr >> 24) & 0x3) + 1;
318 }
Heiko Stuebnera7a2b312013-06-17 22:29:23 +0200319
320 /* Make sure that all cores except the first are really off */
321 for (i = 1; i < ncores; i++)
322 pmu_set_power_domain(0 + i, false);
323}
324
Heiko Stuebner9def7cc2015-11-04 20:25:16 +0800325static void __init rk3036_smp_prepare_cpus(unsigned int max_cpus)
326{
327 has_pmu = false;
328
329 rockchip_smp_prepare_cpus(max_cpus);
330}
331
Romain Perierf54b91f2014-07-19 13:03:26 +0000332#ifdef CONFIG_HOTPLUG_CPU
333static int rockchip_cpu_kill(unsigned int cpu)
334{
Caesar Wange306bc12015-06-09 17:49:58 +0800335 /*
336 * We need a delay here to ensure that the dying CPU can finish
337 * executing v7_coherency_exit() and reach the WFI/WFE state
338 * prior to having the power domain disabled.
339 */
340 mdelay(1);
341
Romain Perierf54b91f2014-07-19 13:03:26 +0000342 pmu_set_power_domain(0 + cpu, false);
343 return 1;
344}
345
346static void rockchip_cpu_die(unsigned int cpu)
347{
348 v7_exit_coherency_flush(louis);
Caesar Wang7f0b61a2015-06-09 17:49:59 +0800349 while (1)
Romain Perierf54b91f2014-07-19 13:03:26 +0000350 cpu_do_idle();
351}
352#endif
353
Heiko Stuebner26dc88f2015-12-04 18:04:44 +0100354static const struct smp_operations rk3036_smp_ops __initconst = {
Heiko Stuebner9def7cc2015-11-04 20:25:16 +0800355 .smp_prepare_cpus = rk3036_smp_prepare_cpus,
356 .smp_boot_secondary = rockchip_boot_secondary,
357#ifdef CONFIG_HOTPLUG_CPU
358 .cpu_kill = rockchip_cpu_kill,
359 .cpu_die = rockchip_cpu_die,
360#endif
361};
362
Masahiro Yamada75305272015-11-15 10:39:53 +0900363static const struct smp_operations rockchip_smp_ops __initconst = {
Heiko Stuebnera7a2b312013-06-17 22:29:23 +0200364 .smp_prepare_cpus = rockchip_smp_prepare_cpus,
365 .smp_boot_secondary = rockchip_boot_secondary,
Romain Perierf54b91f2014-07-19 13:03:26 +0000366#ifdef CONFIG_HOTPLUG_CPU
367 .cpu_kill = rockchip_cpu_kill,
368 .cpu_die = rockchip_cpu_die,
369#endif
Heiko Stuebnera7a2b312013-06-17 22:29:23 +0200370};
Caesar Wang7f0b61a2015-06-09 17:49:59 +0800371
Heiko Stuebner9def7cc2015-11-04 20:25:16 +0800372CPU_METHOD_OF_DECLARE(rk3036_smp, "rockchip,rk3036-smp", &rk3036_smp_ops);
Heiko Stübner26ab69c2014-03-27 01:06:32 +0100373CPU_METHOD_OF_DECLARE(rk3066_smp, "rockchip,rk3066-smp", &rockchip_smp_ops);