blob: 072aa38374ce9a2db26103b4ed83cfa8bcb70434 [file] [log] [blame]
Gregory CLEMENTab8ba012012-11-17 15:22:23 +01001/*
2 * Marvell MVEBU CPU clock handling.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Gregory CLEMENT <gregory.clement@free-electrons.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12#include <linux/kernel.h>
Stephen Boyddb00c3e2015-06-19 15:00:46 -070013#include <linux/slab.h>
14#include <linux/clk.h>
Gregory CLEMENTab8ba012012-11-17 15:22:23 +010015#include <linux/clk-provider.h>
16#include <linux/of_address.h>
17#include <linux/io.h>
18#include <linux/of.h>
19#include <linux/delay.h>
Thomas Petazzoniee2d8ea2014-07-09 17:45:11 +020020#include <linux/mvebu-pmsu.h>
21#include <asm/smp_plat.h>
Gregory CLEMENTab8ba012012-11-17 15:22:23 +010022
Thomas Petazzoniee2d8ea2014-07-09 17:45:11 +020023#define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
24#define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL 0xff
25#define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT 8
26#define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET 0x8
27#define SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16
28#define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
29#define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
30
31#define PMU_DFS_RATIO_SHIFT 16
32#define PMU_DFS_RATIO_MASK 0x3F
Gregory CLEMENTab8ba012012-11-17 15:22:23 +010033
34#define MAX_CPU 4
35struct cpu_clk {
36 struct clk_hw hw;
37 int cpu;
38 const char *clk_name;
39 const char *parent_name;
40 void __iomem *reg_base;
Thomas Petazzoniee2d8ea2014-07-09 17:45:11 +020041 void __iomem *pmu_dfs;
Gregory CLEMENTab8ba012012-11-17 15:22:23 +010042};
43
44static struct clk **clks;
45
46static struct clk_onecell_data clk_data;
47
48#define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
49
50static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
51 unsigned long parent_rate)
52{
53 struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
54 u32 reg, div;
55
56 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
57 div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
58 return parent_rate / div;
59}
60
61static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
62 unsigned long *parent_rate)
63{
64 /* Valid ratio are 1:1, 1:2 and 1:3 */
65 u32 div;
66
67 div = *parent_rate / rate;
68 if (div == 0)
69 div = 1;
70 else if (div > 3)
71 div = 3;
72
73 return *parent_rate / div;
74}
75
Thomas Petazzoniee2d8ea2014-07-09 17:45:11 +020076static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate,
77 unsigned long parent_rate)
78
Gregory CLEMENTab8ba012012-11-17 15:22:23 +010079{
80 struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
81 u32 reg, div;
82 u32 reload_mask;
83
84 div = parent_rate / rate;
85 reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
86 & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
87 | (div << (cpuclk->cpu * 8));
88 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
89 /* Set clock divider reload smooth bit mask */
90 reload_mask = 1 << (20 + cpuclk->cpu);
91
92 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
93 | reload_mask;
94 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
95
96 /* Now trigger the clock update */
97 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
98 | 1 << 24;
99 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
100
101 /* Wait for clocks to settle down then clear reload request */
102 udelay(1000);
103 reg &= ~(reload_mask | 1 << 24);
104 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
105 udelay(1000);
106
107 return 0;
108}
109
Thomas Petazzoniee2d8ea2014-07-09 17:45:11 +0200110static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate,
111 unsigned long parent_rate)
112{
113 u32 reg;
114 unsigned long fabric_div, target_div, cur_rate;
115 struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
116
117 /*
118 * PMU DFS registers are not mapped, Device Tree does not
119 * describes them. We cannot change the frequency dynamically.
120 */
121 if (!cpuclk->pmu_dfs)
122 return -ENODEV;
123
Stephen Boydeca61c92015-07-30 17:20:57 -0700124 cur_rate = clk_hw_get_rate(hwclk);
Thomas Petazzoniee2d8ea2014-07-09 17:45:11 +0200125
126 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET);
127 fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) &
128 SYS_CTRL_CLK_DIVIDER_MASK;
129
130 /* Frequency is going up */
131 if (rate == 2 * cur_rate)
132 target_div = fabric_div / 2;
133 /* Frequency is going down */
134 else
135 target_div = fabric_div;
136
137 if (target_div == 0)
138 target_div = 1;
139
140 reg = readl(cpuclk->pmu_dfs);
141 reg &= ~(PMU_DFS_RATIO_MASK << PMU_DFS_RATIO_SHIFT);
142 reg |= (target_div << PMU_DFS_RATIO_SHIFT);
143 writel(reg, cpuclk->pmu_dfs);
144
145 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
146 reg |= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL <<
147 SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT);
148 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
149
150 return mvebu_pmsu_dfs_request(cpuclk->cpu);
151}
152
153static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
154 unsigned long parent_rate)
155{
156 if (__clk_is_enabled(hwclk->clk))
157 return clk_cpu_on_set_rate(hwclk, rate, parent_rate);
158 else
159 return clk_cpu_off_set_rate(hwclk, rate, parent_rate);
160}
161
Gregory CLEMENTab8ba012012-11-17 15:22:23 +0100162static const struct clk_ops cpu_ops = {
163 .recalc_rate = clk_cpu_recalc_rate,
164 .round_rate = clk_cpu_round_rate,
165 .set_rate = clk_cpu_set_rate,
166};
167
Sachin Kamat9ac817512013-10-08 16:47:45 +0530168static void __init of_cpu_clk_setup(struct device_node *node)
Gregory CLEMENTab8ba012012-11-17 15:22:23 +0100169{
170 struct cpu_clk *cpuclk;
171 void __iomem *clock_complex_base = of_iomap(node, 0);
Thomas Petazzoniee2d8ea2014-07-09 17:45:11 +0200172 void __iomem *pmu_dfs_base = of_iomap(node, 1);
Gregory CLEMENTab8ba012012-11-17 15:22:23 +0100173 int ncpus = 0;
174 struct device_node *dn;
175
176 if (clock_complex_base == NULL) {
177 pr_err("%s: clock-complex base register not set\n",
178 __func__);
179 return;
180 }
181
Thomas Petazzoniee2d8ea2014-07-09 17:45:11 +0200182 if (pmu_dfs_base == NULL)
183 pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n",
184 __func__);
185
Gregory CLEMENTab8ba012012-11-17 15:22:23 +0100186 for_each_node_by_type(dn, "cpu")
187 ncpus++;
188
Markus Elfring23826e22017-04-19 20:15:21 +0200189 cpuclk = kcalloc(ncpus, sizeof(*cpuclk), GFP_KERNEL);
Gregory CLEMENTab8ba012012-11-17 15:22:23 +0100190 if (WARN_ON(!cpuclk))
Jisheng Zhangf98d0072013-08-23 10:34:01 +0800191 goto cpuclk_out;
Gregory CLEMENTab8ba012012-11-17 15:22:23 +0100192
Markus Elfring23826e22017-04-19 20:15:21 +0200193 clks = kcalloc(ncpus, sizeof(*clks), GFP_KERNEL);
Gregory CLEMENTab8ba012012-11-17 15:22:23 +0100194 if (WARN_ON(!clks))
Cong Dingd6f620a2013-01-15 19:44:26 +0100195 goto clks_out;
Gregory CLEMENTab8ba012012-11-17 15:22:23 +0100196
197 for_each_node_by_type(dn, "cpu") {
198 struct clk_init_data init;
199 struct clk *clk;
Gregory CLEMENTab8ba012012-11-17 15:22:23 +0100200 char *clk_name = kzalloc(5, GFP_KERNEL);
201 int cpu, err;
202
203 if (WARN_ON(!clk_name))
Cong Dingd6f620a2013-01-15 19:44:26 +0100204 goto bail_out;
Gregory CLEMENTab8ba012012-11-17 15:22:23 +0100205
206 err = of_property_read_u32(dn, "reg", &cpu);
207 if (WARN_ON(err))
Cong Dingd6f620a2013-01-15 19:44:26 +0100208 goto bail_out;
Gregory CLEMENTab8ba012012-11-17 15:22:23 +0100209
210 sprintf(clk_name, "cpu%d", cpu);
Gregory CLEMENTab8ba012012-11-17 15:22:23 +0100211
Stephen Boyd61e22ff2015-10-16 16:35:11 -0700212 cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
Gregory CLEMENTab8ba012012-11-17 15:22:23 +0100213 cpuclk[cpu].clk_name = clk_name;
214 cpuclk[cpu].cpu = cpu;
215 cpuclk[cpu].reg_base = clock_complex_base;
Thomas Petazzoniee2d8ea2014-07-09 17:45:11 +0200216 if (pmu_dfs_base)
217 cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu;
Gregory CLEMENTab8ba012012-11-17 15:22:23 +0100218 cpuclk[cpu].hw.init = &init;
219
220 init.name = cpuclk[cpu].clk_name;
221 init.ops = &cpu_ops;
222 init.flags = 0;
223 init.parent_names = &cpuclk[cpu].parent_name;
224 init.num_parents = 1;
225
226 clk = clk_register(NULL, &cpuclk[cpu].hw);
227 if (WARN_ON(IS_ERR(clk)))
228 goto bail_out;
229 clks[cpu] = clk;
230 }
231 clk_data.clk_num = MAX_CPU;
232 clk_data.clks = clks;
233 of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
234
235 return;
236bail_out:
237 kfree(clks);
Cong Dingd6f620a2013-01-15 19:44:26 +0100238 while(ncpus--)
239 kfree(cpuclk[ncpus].clk_name);
240clks_out:
Gregory CLEMENTab8ba012012-11-17 15:22:23 +0100241 kfree(cpuclk);
Jisheng Zhangf98d0072013-08-23 10:34:01 +0800242cpuclk_out:
243 iounmap(clock_complex_base);
Gregory CLEMENTab8ba012012-11-17 15:22:23 +0100244}
245
Jean-Francois Moinef640c0f2013-04-02 13:02:36 +0200246CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock",
247 of_cpu_clk_setup);
Chris Packhame120c172017-01-27 16:25:42 +1300248
249static void __init of_mv98dx3236_cpu_clk_setup(struct device_node *node)
250{
251 of_clk_add_provider(node, of_clk_src_simple_get, NULL);
252}
253
254CLK_OF_DECLARE(mv98dx3236_cpu_clock, "marvell,mv98dx3236-cpu-clock",
255 of_mv98dx3236_cpu_clk_setup);