blob: 06537f2b2fb8a18a5d0d7735ca70f60d4a475af5 [file] [log] [blame]
Paul Mundtde9186c2010-10-18 21:32:58 +09001/*
2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3 *
4 * Copyright (C) 2010 Magnus Damm
Paul Mundt4d6ddb02012-04-11 12:05:50 +09005 * Copyright (C) 2010 - 2012 Paul Mundt
Paul Mundtde9186c2010-10-18 21:32:58 +09006 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
Magnus Dammfa676ca2010-05-11 13:29:34 +000011#include <linux/clk.h>
12#include <linux/compiler.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/sh_clk.h>
16
Paul Mundt764f4e42012-05-25 16:34:48 +090017#define CPG_CKSTP_BIT BIT(8)
18
Paul Mundt104fa612012-04-12 19:50:40 +090019static unsigned int sh_clk_read(struct clk *clk)
Magnus Dammfa676ca2010-05-11 13:29:34 +000020{
Paul Mundt4d6ddb02012-04-11 12:05:50 +090021 if (clk->flags & CLK_ENABLE_REG_8BIT)
Paul Mundt104fa612012-04-12 19:50:40 +090022 return ioread8(clk->mapped_reg);
Paul Mundt4d6ddb02012-04-11 12:05:50 +090023 else if (clk->flags & CLK_ENABLE_REG_16BIT)
Paul Mundt104fa612012-04-12 19:50:40 +090024 return ioread16(clk->mapped_reg);
Paul Mundt4d6ddb02012-04-11 12:05:50 +090025
Paul Mundt104fa612012-04-12 19:50:40 +090026 return ioread32(clk->mapped_reg);
27}
28
29static void sh_clk_write(int value, struct clk *clk)
30{
31 if (clk->flags & CLK_ENABLE_REG_8BIT)
32 iowrite8(value, clk->mapped_reg);
33 else if (clk->flags & CLK_ENABLE_REG_16BIT)
34 iowrite16(value, clk->mapped_reg);
35 else
36 iowrite32(value, clk->mapped_reg);
37}
38
39static int sh_clk_mstp_enable(struct clk *clk)
40{
41 sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +000042 return 0;
43}
44
Paul Mundt4d6ddb02012-04-11 12:05:50 +090045static void sh_clk_mstp_disable(struct clk *clk)
Magnus Dammfa676ca2010-05-11 13:29:34 +000046{
Paul Mundt104fa612012-04-12 19:50:40 +090047 sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +000048}
49
Paul Mundt4d6ddb02012-04-11 12:05:50 +090050static struct sh_clk_ops sh_clk_mstp_clk_ops = {
51 .enable = sh_clk_mstp_enable,
52 .disable = sh_clk_mstp_disable,
Magnus Dammfa676ca2010-05-11 13:29:34 +000053 .recalc = followparent_recalc,
54};
55
Paul Mundt4d6ddb02012-04-11 12:05:50 +090056int __init sh_clk_mstp_register(struct clk *clks, int nr)
Magnus Dammfa676ca2010-05-11 13:29:34 +000057{
58 struct clk *clkp;
59 int ret = 0;
60 int k;
61
62 for (k = 0; !ret && (k < nr); k++) {
63 clkp = clks + k;
Paul Mundt4d6ddb02012-04-11 12:05:50 +090064 clkp->ops = &sh_clk_mstp_clk_ops;
Magnus Dammfa676ca2010-05-11 13:29:34 +000065 ret |= clk_register(clkp);
66 }
67
68 return ret;
69}
70
Paul Mundta60977a2012-05-25 14:59:26 +090071/*
72 * Div/mult table lookup helpers
73 */
74static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
75{
76 return clk->priv;
77}
78
79static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
80{
81 return clk_to_div_table(clk)->div_mult_table;
82}
83
84/*
Paul Mundt75f5f8a2012-05-25 15:26:01 +090085 * Common div ops
86 */
87static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
88{
89 return clk_rate_table_round(clk, clk->freq_table, rate);
90}
91
92static unsigned long sh_clk_div_recalc(struct clk *clk)
93{
94 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
95 unsigned int idx;
96
97 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
98 table, clk->arch_flags ? &clk->arch_flags : NULL);
99
100 idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
101
102 return clk->freq_table[idx].frequency;
103}
104
Paul Mundt0fa22162012-05-25 15:52:10 +0900105static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
106{
107 struct clk_div_table *dt = clk_to_div_table(clk);
108 unsigned long value;
109 int idx;
110
111 idx = clk_rate_table_find(clk, clk->freq_table, rate);
112 if (idx < 0)
113 return idx;
114
115 value = sh_clk_read(clk);
116 value &= ~(clk->div_mask << clk->enable_bit);
117 value |= (idx << clk->enable_bit);
118 sh_clk_write(value, clk);
119
120 /* XXX: Should use a post-change notifier */
121 if (dt->kick)
122 dt->kick(clk);
123
124 return 0;
125}
126
Paul Mundt764f4e42012-05-25 16:34:48 +0900127static int sh_clk_div_enable(struct clk *clk)
128{
129 sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
130 return 0;
131}
132
133static void sh_clk_div_disable(struct clk *clk)
134{
135 unsigned int val;
136
137 val = sh_clk_read(clk);
138 val |= CPG_CKSTP_BIT;
139
140 /*
141 * div6 clocks require the divisor field to be non-zero or the
142 * above CKSTP toggle silently fails. Ensure that the divisor
143 * array is reset to its initial state on disable.
144 */
145 if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
146 val |= clk->div_mask;
147
148 sh_clk_write(val, clk);
149}
150
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900151/*
Paul Mundta60977a2012-05-25 14:59:26 +0900152 * div6 support
153 */
Magnus Dammfa676ca2010-05-11 13:29:34 +0000154static int sh_clk_div6_divisors[64] = {
155 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
156 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
157 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
158 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
159};
160
Paul Mundta60977a2012-05-25 14:59:26 +0900161static struct clk_div_mult_table div6_div_mult_table = {
Magnus Dammfa676ca2010-05-11 13:29:34 +0000162 .divisors = sh_clk_div6_divisors,
163 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
164};
165
Paul Mundta60977a2012-05-25 14:59:26 +0900166static struct clk_div_table sh_clk_div6_table = {
167 .div_mult_table = &div6_div_mult_table,
168};
169
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000170static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
171{
Paul Mundta60977a2012-05-25 14:59:26 +0900172 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000173 u32 value;
174 int ret, i;
175
176 if (!clk->parent_table || !clk->parent_num)
177 return -EINVAL;
178
179 /* Search the parent */
180 for (i = 0; i < clk->parent_num; i++)
181 if (clk->parent_table[i] == parent)
182 break;
183
184 if (i == clk->parent_num)
185 return -ENODEV;
186
187 ret = clk_reparent(clk, parent);
188 if (ret < 0)
189 return ret;
190
Paul Mundt104fa612012-04-12 19:50:40 +0900191 value = sh_clk_read(clk) &
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000192 ~(((1 << clk->src_width) - 1) << clk->src_shift);
193
Paul Mundt104fa612012-04-12 19:50:40 +0900194 sh_clk_write(value | (i << clk->src_shift), clk);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000195
196 /* Rebuild the frequency table */
197 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
Kuninori Morimoto52c10ad2011-04-14 17:13:53 +0900198 table, NULL);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000199
200 return 0;
201}
202
Magnus Damma0ec3602012-02-29 22:16:21 +0900203static struct sh_clk_ops sh_clk_div6_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900204 .recalc = sh_clk_div_recalc,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000205 .round_rate = sh_clk_div_round_rate,
Paul Mundt0fa22162012-05-25 15:52:10 +0900206 .set_rate = sh_clk_div_set_rate,
Paul Mundt764f4e42012-05-25 16:34:48 +0900207 .enable = sh_clk_div_enable,
208 .disable = sh_clk_div_disable,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000209};
210
Magnus Damma0ec3602012-02-29 22:16:21 +0900211static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900212 .recalc = sh_clk_div_recalc,
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000213 .round_rate = sh_clk_div_round_rate,
Paul Mundt0fa22162012-05-25 15:52:10 +0900214 .set_rate = sh_clk_div_set_rate,
Paul Mundt764f4e42012-05-25 16:34:48 +0900215 .enable = sh_clk_div_enable,
216 .disable = sh_clk_div_disable,
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000217 .set_parent = sh_clk_div6_set_parent,
218};
219
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800220static int __init sh_clk_init_parent(struct clk *clk)
221{
222 u32 val;
223
224 if (clk->parent)
225 return 0;
226
227 if (!clk->parent_table || !clk->parent_num)
228 return 0;
229
230 if (!clk->src_width) {
231 pr_err("sh_clk_init_parent: cannot select parent clock\n");
232 return -EINVAL;
233 }
234
Paul Mundt104fa612012-04-12 19:50:40 +0900235 val = (sh_clk_read(clk) >> clk->src_shift);
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800236 val &= (1 << clk->src_width) - 1;
237
238 if (val >= clk->parent_num) {
239 pr_err("sh_clk_init_parent: parent table size failed\n");
240 return -EINVAL;
241 }
242
Kuninori Morimoto64dea572012-01-19 01:00:40 -0800243 clk_reparent(clk, clk->parent_table[val]);
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800244 if (!clk->parent) {
245 pr_err("sh_clk_init_parent: unable to set parent");
246 return -EINVAL;
247 }
248
249 return 0;
250}
251
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000252static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
Magnus Damma0ec3602012-02-29 22:16:21 +0900253 struct sh_clk_ops *ops)
Magnus Dammfa676ca2010-05-11 13:29:34 +0000254{
255 struct clk *clkp;
256 void *freq_table;
Paul Mundta60977a2012-05-25 14:59:26 +0900257 struct clk_div_table *table = &sh_clk_div6_table;
258 int nr_divs = table->div_mult_table->nr_divisors;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000259 int freq_table_size = sizeof(struct cpufreq_frequency_table);
260 int ret = 0;
261 int k;
262
263 freq_table_size *= (nr_divs + 1);
264 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
265 if (!freq_table) {
266 pr_err("sh_clk_div6_register: unable to alloc memory\n");
267 return -ENOMEM;
268 }
269
270 for (k = 0; !ret && (k < nr); k++) {
271 clkp = clks + k;
272
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000273 clkp->ops = ops;
Paul Mundta60977a2012-05-25 14:59:26 +0900274 clkp->priv = table;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000275 clkp->freq_table = freq_table + (k * freq_table_size);
276 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
Kuninori Morimoto7784f4d2011-12-11 19:02:09 -0800277 ret = clk_register(clkp);
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800278 if (ret < 0)
279 break;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000280
Kuninori Morimoto7784f4d2011-12-11 19:02:09 -0800281 ret = sh_clk_init_parent(clkp);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000282 }
283
284 return ret;
285}
286
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000287int __init sh_clk_div6_register(struct clk *clks, int nr)
288{
289 return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops);
290}
291
292int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
293{
294 return sh_clk_div6_register_ops(clks, nr,
295 &sh_clk_div6_reparent_clk_ops);
296}
297
Paul Mundta60977a2012-05-25 14:59:26 +0900298/*
299 * div4 support
300 */
Magnus Dammfa676ca2010-05-11 13:29:34 +0000301static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
302{
Paul Mundta60977a2012-05-25 14:59:26 +0900303 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000304 u32 value;
305 int ret;
306
307 /* we really need a better way to determine parent index, but for
308 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
309 * no CLK_ENABLE_ON_INIT means external clock...
310 */
311
312 if (parent->flags & CLK_ENABLE_ON_INIT)
Paul Mundt104fa612012-04-12 19:50:40 +0900313 value = sh_clk_read(clk) & ~(1 << 7);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000314 else
Paul Mundt104fa612012-04-12 19:50:40 +0900315 value = sh_clk_read(clk) | (1 << 7);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000316
317 ret = clk_reparent(clk, parent);
318 if (ret < 0)
319 return ret;
320
Paul Mundt104fa612012-04-12 19:50:40 +0900321 sh_clk_write(value, clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000322
323 /* Rebiuld the frequency table */
324 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
325 table, &clk->arch_flags);
326
327 return 0;
328}
329
Magnus Damma0ec3602012-02-29 22:16:21 +0900330static struct sh_clk_ops sh_clk_div4_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900331 .recalc = sh_clk_div_recalc,
Paul Mundt0fa22162012-05-25 15:52:10 +0900332 .set_rate = sh_clk_div_set_rate,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000333 .round_rate = sh_clk_div_round_rate,
334};
335
Magnus Damma0ec3602012-02-29 22:16:21 +0900336static struct sh_clk_ops sh_clk_div4_enable_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900337 .recalc = sh_clk_div_recalc,
Paul Mundt0fa22162012-05-25 15:52:10 +0900338 .set_rate = sh_clk_div_set_rate,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000339 .round_rate = sh_clk_div_round_rate,
Paul Mundt764f4e42012-05-25 16:34:48 +0900340 .enable = sh_clk_div_enable,
341 .disable = sh_clk_div_disable,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000342};
343
Magnus Damma0ec3602012-02-29 22:16:21 +0900344static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900345 .recalc = sh_clk_div_recalc,
Paul Mundt0fa22162012-05-25 15:52:10 +0900346 .set_rate = sh_clk_div_set_rate,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000347 .round_rate = sh_clk_div_round_rate,
Paul Mundt764f4e42012-05-25 16:34:48 +0900348 .enable = sh_clk_div_enable,
349 .disable = sh_clk_div_disable,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000350 .set_parent = sh_clk_div4_set_parent,
351};
352
353static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
Magnus Damma0ec3602012-02-29 22:16:21 +0900354 struct clk_div4_table *table, struct sh_clk_ops *ops)
Magnus Dammfa676ca2010-05-11 13:29:34 +0000355{
356 struct clk *clkp;
357 void *freq_table;
358 int nr_divs = table->div_mult_table->nr_divisors;
359 int freq_table_size = sizeof(struct cpufreq_frequency_table);
360 int ret = 0;
361 int k;
362
363 freq_table_size *= (nr_divs + 1);
364 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
365 if (!freq_table) {
366 pr_err("sh_clk_div4_register: unable to alloc memory\n");
367 return -ENOMEM;
368 }
369
370 for (k = 0; !ret && (k < nr); k++) {
371 clkp = clks + k;
372
373 clkp->ops = ops;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000374 clkp->priv = table;
375
376 clkp->freq_table = freq_table + (k * freq_table_size);
377 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
378
379 ret = clk_register(clkp);
380 }
381
382 return ret;
383}
384
385int __init sh_clk_div4_register(struct clk *clks, int nr,
386 struct clk_div4_table *table)
387{
388 return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
389}
390
391int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
392 struct clk_div4_table *table)
393{
394 return sh_clk_div4_register_ops(clks, nr, table,
395 &sh_clk_div4_enable_clk_ops);
396}
397
398int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
399 struct clk_div4_table *table)
400{
401 return sh_clk_div4_register_ops(clks, nr, table,
402 &sh_clk_div4_reparent_clk_ops);
403}