blob: db7d9ab298b64f7ad949a18918ef302b4d80928a [file] [log] [blame]
Gregory CLEMENT7444dad2012-08-02 11:17:51 +03001/*
2 * Power Management Service Unit(PMSU) support for Armada 370/XP platforms.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Yehuda Yitschak <yehuday@marvell.com>
7 * Gregory Clement <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 *
14 * The Armada 370 and Armada XP SOCs have a power management service
15 * unit which is responsible for powering down and waking up CPUs and
16 * other SOC units
17 */
18
Thomas Petazzonibd045a12014-04-14 15:50:30 +020019#define pr_fmt(fmt) "mvebu-pmsu: " fmt
20
Thomas Petazzonia509ea82014-07-09 17:45:10 +020021#include <linux/clk.h>
Gregory CLEMENTd163ee12014-04-14 17:10:12 +020022#include <linux/cpu_pm.h>
Thomas Petazzonia509ea82014-07-09 17:45:10 +020023#include <linux/delay.h>
Gregory CLEMENT7444dad2012-08-02 11:17:51 +030024#include <linux/kernel.h>
25#include <linux/init.h>
26#include <linux/of_address.h>
Thomas Petazzonia509ea82014-07-09 17:45:10 +020027#include <linux/of_device.h>
Gregory CLEMENT7444dad2012-08-02 11:17:51 +030028#include <linux/io.h>
Gregory CLEMENT8c16bab2014-04-14 17:10:14 +020029#include <linux/platform_device.h>
Thomas Petazzonia509ea82014-07-09 17:45:10 +020030#include <linux/pm_opp.h>
Gregory CLEMENT7444dad2012-08-02 11:17:51 +030031#include <linux/smp.h>
Thomas Petazzoni49754ff2014-04-14 15:50:29 +020032#include <linux/resource.h>
Thomas Petazzonia509ea82014-07-09 17:45:10 +020033#include <linux/slab.h>
Gregory CLEMENTc3e04ca2014-04-14 17:10:11 +020034#include <asm/cacheflush.h>
35#include <asm/cp15.h>
Gregory CLEMENT7444dad2012-08-02 11:17:51 +030036#include <asm/smp_plat.h>
Gregory CLEMENTc3e04ca2014-04-14 17:10:11 +020037#include <asm/suspend.h>
38#include <asm/tlbflush.h>
Thomas Petazzoni49754ff2014-04-14 15:50:29 +020039#include "common.h"
Thomas Petazzonia509ea82014-07-09 17:45:10 +020040#include "armada-370-xp.h"
Gregory CLEMENT7444dad2012-08-02 11:17:51 +030041
42static void __iomem *pmsu_mp_base;
Gregory CLEMENT7444dad2012-08-02 11:17:51 +030043
Gregory CLEMENT0c3acc72014-04-14 15:50:31 +020044#define PMSU_BASE_OFFSET 0x100
45#define PMSU_REG_SIZE 0x1000
46
Gregory CLEMENTf713c7e2014-04-14 17:10:10 +020047/* PMSU MP registers */
Gregory CLEMENTc3e04ca2014-04-14 17:10:11 +020048#define PMSU_CONTROL_AND_CONFIG(cpu) ((cpu * 0x100) + 0x104)
49#define PMSU_CONTROL_AND_CONFIG_DFS_REQ BIT(18)
50#define PMSU_CONTROL_AND_CONFIG_PWDDN_REQ BIT(16)
51#define PMSU_CONTROL_AND_CONFIG_L2_PWDDN BIT(20)
52
53#define PMSU_CPU_POWER_DOWN_CONTROL(cpu) ((cpu * 0x100) + 0x108)
54
55#define PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP BIT(0)
56
57#define PMSU_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x10c)
58#define PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT BIT(16)
59#define PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT BIT(17)
60#define PMSU_STATUS_AND_MASK_IRQ_WAKEUP BIT(20)
61#define PMSU_STATUS_AND_MASK_FIQ_WAKEUP BIT(21)
62#define PMSU_STATUS_AND_MASK_DBG_WAKEUP BIT(22)
63#define PMSU_STATUS_AND_MASK_IRQ_MASK BIT(24)
64#define PMSU_STATUS_AND_MASK_FIQ_MASK BIT(25)
65
Thomas Petazzonia509ea82014-07-09 17:45:10 +020066#define PMSU_EVENT_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x120)
67#define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE BIT(1)
68#define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK BIT(17)
69
Gregory CLEMENTf713c7e2014-04-14 17:10:10 +020070#define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x124)
71
72/* PMSU fabric registers */
73#define L2C_NFABRIC_PM_CTL 0x4
74#define L2C_NFABRIC_PM_CTL_PWR_DOWN BIT(20)
Gregory CLEMENT7444dad2012-08-02 11:17:51 +030075
Gregory CLEMENTc3e04ca2014-04-14 17:10:11 +020076extern void ll_disable_coherency(void);
77extern void ll_enable_coherency(void);
78
Gregory CLEMENT8c16bab2014-04-14 17:10:14 +020079static struct platform_device armada_xp_cpuidle_device = {
80 .name = "cpuidle-armada-370-xp",
81};
82
Gregory CLEMENT7444dad2012-08-02 11:17:51 +030083static struct of_device_id of_pmsu_table[] = {
Gregory CLEMENT0c3acc72014-04-14 15:50:31 +020084 { .compatible = "marvell,armada-370-pmsu", },
85 { .compatible = "marvell,armada-370-xp-pmsu", },
Thomas Petazzonib4bca242014-04-14 15:54:04 +020086 { .compatible = "marvell,armada-380-pmsu", },
Gregory CLEMENT7444dad2012-08-02 11:17:51 +030087 { /* end of list */ },
88};
89
Thomas Petazzoni05ad6902014-04-14 15:53:58 +020090void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr)
Gregory CLEMENT02e7b062014-04-14 15:50:33 +020091{
92 writel(virt_to_phys(boot_addr), pmsu_mp_base +
93 PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
94}
95
Jisheng Zhangb12634e2013-11-07 17:02:38 +080096static int __init armada_370_xp_pmsu_init(void)
Gregory CLEMENT7444dad2012-08-02 11:17:51 +030097{
98 struct device_node *np;
Thomas Petazzonibd045a12014-04-14 15:50:30 +020099 struct resource res;
100 int ret = 0;
Gregory CLEMENT7444dad2012-08-02 11:17:51 +0300101
102 np = of_find_matching_node(NULL, of_pmsu_table);
Thomas Petazzonibd045a12014-04-14 15:50:30 +0200103 if (!np)
104 return 0;
105
106 pr_info("Initializing Power Management Service Unit\n");
107
108 if (of_address_to_resource(np, 0, &res)) {
109 pr_err("unable to get resource\n");
110 ret = -ENOENT;
111 goto out;
Gregory CLEMENT7444dad2012-08-02 11:17:51 +0300112 }
113
Gregory CLEMENT0c3acc72014-04-14 15:50:31 +0200114 if (of_device_is_compatible(np, "marvell,armada-370-xp-pmsu")) {
115 pr_warn(FW_WARN "deprecated pmsu binding\n");
116 res.start = res.start - PMSU_BASE_OFFSET;
117 res.end = res.start + PMSU_REG_SIZE - 1;
118 }
119
Thomas Petazzonibd045a12014-04-14 15:50:30 +0200120 if (!request_mem_region(res.start, resource_size(&res),
121 np->full_name)) {
122 pr_err("unable to request region\n");
123 ret = -EBUSY;
124 goto out;
125 }
126
127 pmsu_mp_base = ioremap(res.start, resource_size(&res));
128 if (!pmsu_mp_base) {
129 pr_err("unable to map registers\n");
130 release_mem_region(res.start, resource_size(&res));
131 ret = -ENOMEM;
132 goto out;
133 }
134
135 out:
136 of_node_put(np);
137 return ret;
Gregory CLEMENT7444dad2012-08-02 11:17:51 +0300138}
139
Gregory CLEMENTf713c7e2014-04-14 17:10:10 +0200140static void armada_370_xp_pmsu_enable_l2_powerdown_onidle(void)
141{
142 u32 reg;
143
144 if (pmsu_mp_base == NULL)
145 return;
146
147 /* Enable L2 & Fabric powerdown in Deep-Idle mode - Fabric */
148 reg = readl(pmsu_mp_base + L2C_NFABRIC_PM_CTL);
149 reg |= L2C_NFABRIC_PM_CTL_PWR_DOWN;
150 writel(reg, pmsu_mp_base + L2C_NFABRIC_PM_CTL);
151}
152
Gregory CLEMENTc3e04ca2014-04-14 17:10:11 +0200153static void armada_370_xp_cpu_resume(void)
154{
155 asm volatile("bl ll_add_cpu_to_smp_group\n\t"
156 "bl ll_enable_coherency\n\t"
157 "b cpu_resume\n\t");
158}
159
160/* No locking is needed because we only access per-CPU registers */
161void armada_370_xp_pmsu_idle_prepare(bool deepidle)
162{
163 unsigned int hw_cpu = cpu_logical_map(smp_processor_id());
164 u32 reg;
165
166 if (pmsu_mp_base == NULL)
167 return;
168
169 /*
170 * Adjust the PMSU configuration to wait for WFI signal, enable
171 * IRQ and FIQ as wakeup events, set wait for snoop queue empty
172 * indication and mask IRQ and FIQ from CPU
173 */
174 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
175 reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT |
176 PMSU_STATUS_AND_MASK_IRQ_WAKEUP |
177 PMSU_STATUS_AND_MASK_FIQ_WAKEUP |
178 PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT |
179 PMSU_STATUS_AND_MASK_IRQ_MASK |
180 PMSU_STATUS_AND_MASK_FIQ_MASK;
181 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
182
183 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
184 /* ask HW to power down the L2 Cache if needed */
185 if (deepidle)
186 reg |= PMSU_CONTROL_AND_CONFIG_L2_PWDDN;
187
188 /* request power down */
189 reg |= PMSU_CONTROL_AND_CONFIG_PWDDN_REQ;
190 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
191
192 /* Disable snoop disable by HW - SW is taking care of it */
193 reg = readl(pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu));
194 reg |= PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP;
195 writel(reg, pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu));
196}
197
198static noinline int do_armada_370_xp_cpu_suspend(unsigned long deepidle)
199{
200 armada_370_xp_pmsu_idle_prepare(deepidle);
201
202 v7_exit_coherency_flush(all);
203
204 ll_disable_coherency();
205
206 dsb();
207
208 wfi();
209
210 /* If we are here, wfi failed. As processors run out of
211 * coherency for some time, tlbs might be stale, so flush them
212 */
213 local_flush_tlb_all();
214
215 ll_enable_coherency();
216
217 /* Test the CR_C bit and set it if it was cleared */
218 asm volatile(
219 "mrc p15, 0, %0, c1, c0, 0 \n\t"
220 "tst %0, #(1 << 2) \n\t"
221 "orreq %0, %0, #(1 << 2) \n\t"
222 "mcreq p15, 0, %0, c1, c0, 0 \n\t"
223 "isb "
224 : : "r" (0));
225
226 pr_warn("Failed to suspend the system\n");
227
228 return 0;
229}
230
231static int armada_370_xp_cpu_suspend(unsigned long deepidle)
232{
233 return cpu_suspend(deepidle, do_armada_370_xp_cpu_suspend);
234}
235
236/* No locking is needed because we only access per-CPU registers */
237static noinline void armada_370_xp_pmsu_idle_restore(void)
238{
239 unsigned int hw_cpu = cpu_logical_map(smp_processor_id());
240 u32 reg;
241
242 if (pmsu_mp_base == NULL)
243 return;
244
245 /* cancel ask HW to power down the L2 Cache if possible */
246 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
247 reg &= ~PMSU_CONTROL_AND_CONFIG_L2_PWDDN;
248 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
249
250 /* cancel Enable wakeup events and mask interrupts */
251 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
252 reg &= ~(PMSU_STATUS_AND_MASK_IRQ_WAKEUP | PMSU_STATUS_AND_MASK_FIQ_WAKEUP);
253 reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT;
254 reg &= ~PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT;
255 reg &= ~(PMSU_STATUS_AND_MASK_IRQ_MASK | PMSU_STATUS_AND_MASK_FIQ_MASK);
256 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
257}
258
Gregory CLEMENTd163ee12014-04-14 17:10:12 +0200259static int armada_370_xp_cpu_pm_notify(struct notifier_block *self,
260 unsigned long action, void *hcpu)
261{
262 if (action == CPU_PM_ENTER) {
263 unsigned int hw_cpu = cpu_logical_map(smp_processor_id());
264 mvebu_pmsu_set_cpu_boot_addr(hw_cpu, armada_370_xp_cpu_resume);
265 } else if (action == CPU_PM_EXIT) {
266 armada_370_xp_pmsu_idle_restore();
267 }
268
269 return NOTIFY_OK;
270}
271
272static struct notifier_block armada_370_xp_cpu_pm_notifier = {
273 .notifier_call = armada_370_xp_cpu_pm_notify,
274};
275
Gregory CLEMENT8c16bab2014-04-14 17:10:14 +0200276int __init armada_370_xp_cpu_pm_init(void)
277{
278 struct device_node *np;
279
280 /*
281 * Check that all the requirements are available to enable
282 * cpuidle. So far, it is only supported on Armada XP, cpuidle
283 * needs the coherency fabric and the PMSU enabled
284 */
285
286 if (!of_machine_is_compatible("marvell,armadaxp"))
287 return 0;
288
289 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
290 if (!np)
291 return 0;
292 of_node_put(np);
293
294 np = of_find_matching_node(NULL, of_pmsu_table);
295 if (!np)
296 return 0;
297 of_node_put(np);
298
299 armada_370_xp_pmsu_enable_l2_powerdown_onidle();
300 armada_xp_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend;
301 platform_device_register(&armada_xp_cpuidle_device);
302 cpu_pm_register_notifier(&armada_370_xp_cpu_pm_notifier);
303
304 return 0;
305}
306
307arch_initcall(armada_370_xp_cpu_pm_init);
Gregory CLEMENT7444dad2012-08-02 11:17:51 +0300308early_initcall(armada_370_xp_pmsu_init);
Thomas Petazzonia509ea82014-07-09 17:45:10 +0200309
310static void mvebu_pmsu_dfs_request_local(void *data)
311{
312 u32 reg;
313 u32 cpu = smp_processor_id();
314 unsigned long flags;
315
316 local_irq_save(flags);
317
318 /* Prepare to enter idle */
319 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
320 reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT |
321 PMSU_STATUS_AND_MASK_IRQ_MASK |
322 PMSU_STATUS_AND_MASK_FIQ_MASK;
323 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
324
325 /* Request the DFS transition */
326 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu));
327 reg |= PMSU_CONTROL_AND_CONFIG_DFS_REQ;
328 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu));
329
330 /* The fact of entering idle will trigger the DFS transition */
331 wfi();
332
333 /*
334 * We're back from idle, the DFS transition has completed,
335 * clear the idle wait indication.
336 */
337 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
338 reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT;
339 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
340
341 local_irq_restore(flags);
342}
343
344int mvebu_pmsu_dfs_request(int cpu)
345{
346 unsigned long timeout;
347 int hwcpu = cpu_logical_map(cpu);
348 u32 reg;
349
350 /* Clear any previous DFS DONE event */
351 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
352 reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE;
353 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
354
355 /* Mask the DFS done interrupt, since we are going to poll */
356 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
357 reg |= PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK;
358 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
359
360 /* Trigger the DFS on the appropriate CPU */
361 smp_call_function_single(cpu, mvebu_pmsu_dfs_request_local,
362 NULL, false);
363
364 /* Poll until the DFS done event is generated */
365 timeout = jiffies + HZ;
366 while (time_before(jiffies, timeout)) {
367 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
368 if (reg & PMSU_EVENT_STATUS_AND_MASK_DFS_DONE)
369 break;
370 udelay(10);
371 }
372
373 if (time_after(jiffies, timeout))
374 return -ETIME;
375
376 /* Restore the DFS mask to its original state */
377 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
378 reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK;
379 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
380
381 return 0;
382}
383
384static int __init armada_xp_pmsu_cpufreq_init(void)
385{
386 struct device_node *np;
387 struct resource res;
388 int ret, cpu;
389
390 if (!of_machine_is_compatible("marvell,armadaxp"))
391 return 0;
392
393 /*
394 * In order to have proper cpufreq handling, we need to ensure
395 * that the Device Tree description of the CPU clock includes
396 * the definition of the PMU DFS registers. If not, we do not
397 * register the clock notifier and the cpufreq driver. This
398 * piece of code is only for compatibility with old Device
399 * Trees.
400 */
401 np = of_find_compatible_node(NULL, NULL, "marvell,armada-xp-cpu-clock");
402 if (!np)
403 return 0;
404
405 ret = of_address_to_resource(np, 1, &res);
406 if (ret) {
407 pr_warn(FW_WARN "not enabling cpufreq, deprecated armada-xp-cpu-clock binding\n");
408 of_node_put(np);
409 return 0;
410 }
411
412 of_node_put(np);
413
414 /*
415 * For each CPU, this loop registers the operating points
416 * supported (which are the nominal CPU frequency and half of
417 * it), and registers the clock notifier that will take care
418 * of doing the PMSU part of a frequency transition.
419 */
420 for_each_possible_cpu(cpu) {
421 struct device *cpu_dev;
422 struct clk *clk;
423 int ret;
424
425 cpu_dev = get_cpu_device(cpu);
426 if (!cpu_dev) {
427 pr_err("Cannot get CPU %d\n", cpu);
428 continue;
429 }
430
431 clk = clk_get(cpu_dev, 0);
432 if (!clk) {
433 pr_err("Cannot get clock for CPU %d\n", cpu);
434 return -ENODEV;
435 }
436
437 /*
438 * In case of a failure of dev_pm_opp_add(), we don't
439 * bother with cleaning up the registered OPP (there's
440 * no function to do so), and simply cancel the
441 * registration of the cpufreq device.
442 */
443 ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0);
444 if (ret) {
445 clk_put(clk);
446 return ret;
447 }
448
449 ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
450 if (ret) {
451 clk_put(clk);
452 return ret;
453 }
454 }
455
456 platform_device_register_simple("cpufreq-generic", -1, NULL, 0);
457 return 0;
458}
459
460device_initcall(armada_xp_pmsu_cpufreq_init);