blob: 84aeeb8fe013909009f2b4220eeacd408a82981c [file] [log] [blame]
Paul Mundtde9186c2010-10-18 21:32:58 +09001/*
2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3 *
4 * Copyright (C) 2010 Magnus Damm
Paul Mundt4d6ddb02012-04-11 12:05:50 +09005 * Copyright (C) 2010 - 2012 Paul Mundt
Paul Mundtde9186c2010-10-18 21:32:58 +09006 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
Magnus Dammfa676ca2010-05-11 13:29:34 +000011#include <linux/clk.h>
12#include <linux/compiler.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/sh_clk.h>
16
Paul Mundt104fa612012-04-12 19:50:40 +090017static unsigned int sh_clk_read(struct clk *clk)
Magnus Dammfa676ca2010-05-11 13:29:34 +000018{
Paul Mundt4d6ddb02012-04-11 12:05:50 +090019 if (clk->flags & CLK_ENABLE_REG_8BIT)
Paul Mundt104fa612012-04-12 19:50:40 +090020 return ioread8(clk->mapped_reg);
Paul Mundt4d6ddb02012-04-11 12:05:50 +090021 else if (clk->flags & CLK_ENABLE_REG_16BIT)
Paul Mundt104fa612012-04-12 19:50:40 +090022 return ioread16(clk->mapped_reg);
Paul Mundt4d6ddb02012-04-11 12:05:50 +090023
Paul Mundt104fa612012-04-12 19:50:40 +090024 return ioread32(clk->mapped_reg);
25}
26
27static void sh_clk_write(int value, struct clk *clk)
28{
29 if (clk->flags & CLK_ENABLE_REG_8BIT)
30 iowrite8(value, clk->mapped_reg);
31 else if (clk->flags & CLK_ENABLE_REG_16BIT)
32 iowrite16(value, clk->mapped_reg);
33 else
34 iowrite32(value, clk->mapped_reg);
35}
36
37static int sh_clk_mstp_enable(struct clk *clk)
38{
39 sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +000040 return 0;
41}
42
Paul Mundt4d6ddb02012-04-11 12:05:50 +090043static void sh_clk_mstp_disable(struct clk *clk)
Magnus Dammfa676ca2010-05-11 13:29:34 +000044{
Paul Mundt104fa612012-04-12 19:50:40 +090045 sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +000046}
47
Paul Mundt4d6ddb02012-04-11 12:05:50 +090048static struct sh_clk_ops sh_clk_mstp_clk_ops = {
49 .enable = sh_clk_mstp_enable,
50 .disable = sh_clk_mstp_disable,
Magnus Dammfa676ca2010-05-11 13:29:34 +000051 .recalc = followparent_recalc,
52};
53
Paul Mundt4d6ddb02012-04-11 12:05:50 +090054int __init sh_clk_mstp_register(struct clk *clks, int nr)
Magnus Dammfa676ca2010-05-11 13:29:34 +000055{
56 struct clk *clkp;
57 int ret = 0;
58 int k;
59
60 for (k = 0; !ret && (k < nr); k++) {
61 clkp = clks + k;
Paul Mundt4d6ddb02012-04-11 12:05:50 +090062 clkp->ops = &sh_clk_mstp_clk_ops;
Magnus Dammfa676ca2010-05-11 13:29:34 +000063 ret |= clk_register(clkp);
64 }
65
66 return ret;
67}
68
Paul Mundta60977a2012-05-25 14:59:26 +090069/*
70 * Div/mult table lookup helpers
71 */
72static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
73{
74 return clk->priv;
75}
76
77static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
78{
79 return clk_to_div_table(clk)->div_mult_table;
80}
81
82/*
Paul Mundt75f5f8a2012-05-25 15:26:01 +090083 * Common div ops
84 */
85static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
86{
87 return clk_rate_table_round(clk, clk->freq_table, rate);
88}
89
90static unsigned long sh_clk_div_recalc(struct clk *clk)
91{
92 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
93 unsigned int idx;
94
95 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
96 table, clk->arch_flags ? &clk->arch_flags : NULL);
97
98 idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
99
100 return clk->freq_table[idx].frequency;
101}
102
103/*
Paul Mundta60977a2012-05-25 14:59:26 +0900104 * div6 support
105 */
Magnus Dammfa676ca2010-05-11 13:29:34 +0000106static int sh_clk_div6_divisors[64] = {
107 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
108 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
109 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
110 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
111};
112
Paul Mundta60977a2012-05-25 14:59:26 +0900113static struct clk_div_mult_table div6_div_mult_table = {
Magnus Dammfa676ca2010-05-11 13:29:34 +0000114 .divisors = sh_clk_div6_divisors,
115 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
116};
117
Paul Mundta60977a2012-05-25 14:59:26 +0900118static struct clk_div_table sh_clk_div6_table = {
119 .div_mult_table = &div6_div_mult_table,
120};
121
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000122static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
123{
Paul Mundta60977a2012-05-25 14:59:26 +0900124 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000125 u32 value;
126 int ret, i;
127
128 if (!clk->parent_table || !clk->parent_num)
129 return -EINVAL;
130
131 /* Search the parent */
132 for (i = 0; i < clk->parent_num; i++)
133 if (clk->parent_table[i] == parent)
134 break;
135
136 if (i == clk->parent_num)
137 return -ENODEV;
138
139 ret = clk_reparent(clk, parent);
140 if (ret < 0)
141 return ret;
142
Paul Mundt104fa612012-04-12 19:50:40 +0900143 value = sh_clk_read(clk) &
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000144 ~(((1 << clk->src_width) - 1) << clk->src_shift);
145
Paul Mundt104fa612012-04-12 19:50:40 +0900146 sh_clk_write(value | (i << clk->src_shift), clk);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000147
148 /* Rebuild the frequency table */
149 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
Kuninori Morimoto52c10ad2011-04-14 17:13:53 +0900150 table, NULL);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000151
152 return 0;
153}
154
Paul Mundt35a96c72010-11-15 18:18:32 +0900155static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
Magnus Dammfa676ca2010-05-11 13:29:34 +0000156{
157 unsigned long value;
158 int idx;
159
160 idx = clk_rate_table_find(clk, clk->freq_table, rate);
161 if (idx < 0)
162 return idx;
163
Paul Mundt104fa612012-04-12 19:50:40 +0900164 value = sh_clk_read(clk);
Paul Mundt1111cc12012-05-25 15:21:43 +0900165 value &= ~clk->div_mask;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000166 value |= idx;
Paul Mundt104fa612012-04-12 19:50:40 +0900167 sh_clk_write(value, clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000168 return 0;
169}
170
171static int sh_clk_div6_enable(struct clk *clk)
172{
173 unsigned long value;
174 int ret;
175
Paul Mundtf278ea82010-11-19 16:40:35 +0900176 ret = sh_clk_div6_set_rate(clk, clk->rate);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000177 if (ret == 0) {
Paul Mundt104fa612012-04-12 19:50:40 +0900178 value = sh_clk_read(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000179 value &= ~0x100; /* clear stop bit to enable clock */
Paul Mundt104fa612012-04-12 19:50:40 +0900180 sh_clk_write(value, clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000181 }
182 return ret;
183}
184
185static void sh_clk_div6_disable(struct clk *clk)
186{
187 unsigned long value;
188
Paul Mundt104fa612012-04-12 19:50:40 +0900189 value = sh_clk_read(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000190 value |= 0x100; /* stop clock */
Paul Mundt1111cc12012-05-25 15:21:43 +0900191 value |= clk->div_mask; /* VDIV bits must be non-zero, overwrite divider */
Paul Mundt104fa612012-04-12 19:50:40 +0900192 sh_clk_write(value, clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000193}
194
Magnus Damma0ec3602012-02-29 22:16:21 +0900195static struct sh_clk_ops sh_clk_div6_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900196 .recalc = sh_clk_div_recalc,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000197 .round_rate = sh_clk_div_round_rate,
198 .set_rate = sh_clk_div6_set_rate,
199 .enable = sh_clk_div6_enable,
200 .disable = sh_clk_div6_disable,
201};
202
Magnus Damma0ec3602012-02-29 22:16:21 +0900203static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900204 .recalc = sh_clk_div_recalc,
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000205 .round_rate = sh_clk_div_round_rate,
206 .set_rate = sh_clk_div6_set_rate,
207 .enable = sh_clk_div6_enable,
208 .disable = sh_clk_div6_disable,
209 .set_parent = sh_clk_div6_set_parent,
210};
211
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800212static int __init sh_clk_init_parent(struct clk *clk)
213{
214 u32 val;
215
216 if (clk->parent)
217 return 0;
218
219 if (!clk->parent_table || !clk->parent_num)
220 return 0;
221
222 if (!clk->src_width) {
223 pr_err("sh_clk_init_parent: cannot select parent clock\n");
224 return -EINVAL;
225 }
226
Paul Mundt104fa612012-04-12 19:50:40 +0900227 val = (sh_clk_read(clk) >> clk->src_shift);
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800228 val &= (1 << clk->src_width) - 1;
229
230 if (val >= clk->parent_num) {
231 pr_err("sh_clk_init_parent: parent table size failed\n");
232 return -EINVAL;
233 }
234
Kuninori Morimoto64dea572012-01-19 01:00:40 -0800235 clk_reparent(clk, clk->parent_table[val]);
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800236 if (!clk->parent) {
237 pr_err("sh_clk_init_parent: unable to set parent");
238 return -EINVAL;
239 }
240
241 return 0;
242}
243
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000244static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
Magnus Damma0ec3602012-02-29 22:16:21 +0900245 struct sh_clk_ops *ops)
Magnus Dammfa676ca2010-05-11 13:29:34 +0000246{
247 struct clk *clkp;
248 void *freq_table;
Paul Mundta60977a2012-05-25 14:59:26 +0900249 struct clk_div_table *table = &sh_clk_div6_table;
250 int nr_divs = table->div_mult_table->nr_divisors;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000251 int freq_table_size = sizeof(struct cpufreq_frequency_table);
252 int ret = 0;
253 int k;
254
255 freq_table_size *= (nr_divs + 1);
256 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
257 if (!freq_table) {
258 pr_err("sh_clk_div6_register: unable to alloc memory\n");
259 return -ENOMEM;
260 }
261
262 for (k = 0; !ret && (k < nr); k++) {
263 clkp = clks + k;
264
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000265 clkp->ops = ops;
Paul Mundta60977a2012-05-25 14:59:26 +0900266 clkp->priv = table;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000267 clkp->freq_table = freq_table + (k * freq_table_size);
268 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
Kuninori Morimoto7784f4d2011-12-11 19:02:09 -0800269 ret = clk_register(clkp);
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800270 if (ret < 0)
271 break;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000272
Kuninori Morimoto7784f4d2011-12-11 19:02:09 -0800273 ret = sh_clk_init_parent(clkp);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000274 }
275
276 return ret;
277}
278
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000279int __init sh_clk_div6_register(struct clk *clks, int nr)
280{
281 return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops);
282}
283
284int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
285{
286 return sh_clk_div6_register_ops(clks, nr,
287 &sh_clk_div6_reparent_clk_ops);
288}
289
Paul Mundta60977a2012-05-25 14:59:26 +0900290/*
291 * div4 support
292 */
Magnus Dammfa676ca2010-05-11 13:29:34 +0000293static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
294{
Paul Mundta60977a2012-05-25 14:59:26 +0900295 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000296 u32 value;
297 int ret;
298
299 /* we really need a better way to determine parent index, but for
300 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
301 * no CLK_ENABLE_ON_INIT means external clock...
302 */
303
304 if (parent->flags & CLK_ENABLE_ON_INIT)
Paul Mundt104fa612012-04-12 19:50:40 +0900305 value = sh_clk_read(clk) & ~(1 << 7);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000306 else
Paul Mundt104fa612012-04-12 19:50:40 +0900307 value = sh_clk_read(clk) | (1 << 7);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000308
309 ret = clk_reparent(clk, parent);
310 if (ret < 0)
311 return ret;
312
Paul Mundt104fa612012-04-12 19:50:40 +0900313 sh_clk_write(value, clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000314
315 /* Rebiuld the frequency table */
316 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
317 table, &clk->arch_flags);
318
319 return 0;
320}
321
Paul Mundt35a96c72010-11-15 18:18:32 +0900322static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
Magnus Dammfa676ca2010-05-11 13:29:34 +0000323{
Paul Mundta60977a2012-05-25 14:59:26 +0900324 struct clk_div_table *dt = clk_to_div_table(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000325 unsigned long value;
326 int idx = clk_rate_table_find(clk, clk->freq_table, rate);
327 if (idx < 0)
328 return idx;
329
Paul Mundt104fa612012-04-12 19:50:40 +0900330 value = sh_clk_read(clk);
Paul Mundt1111cc12012-05-25 15:21:43 +0900331 value &= ~(clk->div_mask << clk->enable_bit);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000332 value |= (idx << clk->enable_bit);
Paul Mundt104fa612012-04-12 19:50:40 +0900333 sh_clk_write(value, clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000334
Paul Mundta60977a2012-05-25 14:59:26 +0900335 /* XXX: Should use a post-change notifier */
336 if (dt->kick)
337 dt->kick(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000338
339 return 0;
340}
341
342static int sh_clk_div4_enable(struct clk *clk)
343{
Paul Mundt104fa612012-04-12 19:50:40 +0900344 sh_clk_write(sh_clk_read(clk) & ~(1 << 8), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000345 return 0;
346}
347
348static void sh_clk_div4_disable(struct clk *clk)
349{
Paul Mundt104fa612012-04-12 19:50:40 +0900350 sh_clk_write(sh_clk_read(clk) | (1 << 8), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000351}
352
Magnus Damma0ec3602012-02-29 22:16:21 +0900353static struct sh_clk_ops sh_clk_div4_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900354 .recalc = sh_clk_div_recalc,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000355 .set_rate = sh_clk_div4_set_rate,
356 .round_rate = sh_clk_div_round_rate,
357};
358
Magnus Damma0ec3602012-02-29 22:16:21 +0900359static struct sh_clk_ops sh_clk_div4_enable_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900360 .recalc = sh_clk_div_recalc,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000361 .set_rate = sh_clk_div4_set_rate,
362 .round_rate = sh_clk_div_round_rate,
363 .enable = sh_clk_div4_enable,
364 .disable = sh_clk_div4_disable,
365};
366
Magnus Damma0ec3602012-02-29 22:16:21 +0900367static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900368 .recalc = sh_clk_div_recalc,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000369 .set_rate = sh_clk_div4_set_rate,
370 .round_rate = sh_clk_div_round_rate,
371 .enable = sh_clk_div4_enable,
372 .disable = sh_clk_div4_disable,
373 .set_parent = sh_clk_div4_set_parent,
374};
375
376static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
Magnus Damma0ec3602012-02-29 22:16:21 +0900377 struct clk_div4_table *table, struct sh_clk_ops *ops)
Magnus Dammfa676ca2010-05-11 13:29:34 +0000378{
379 struct clk *clkp;
380 void *freq_table;
381 int nr_divs = table->div_mult_table->nr_divisors;
382 int freq_table_size = sizeof(struct cpufreq_frequency_table);
383 int ret = 0;
384 int k;
385
386 freq_table_size *= (nr_divs + 1);
387 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
388 if (!freq_table) {
389 pr_err("sh_clk_div4_register: unable to alloc memory\n");
390 return -ENOMEM;
391 }
392
393 for (k = 0; !ret && (k < nr); k++) {
394 clkp = clks + k;
395
396 clkp->ops = ops;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000397 clkp->priv = table;
398
399 clkp->freq_table = freq_table + (k * freq_table_size);
400 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
401
402 ret = clk_register(clkp);
403 }
404
405 return ret;
406}
407
408int __init sh_clk_div4_register(struct clk *clks, int nr,
409 struct clk_div4_table *table)
410{
411 return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
412}
413
414int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
415 struct clk_div4_table *table)
416{
417 return sh_clk_div4_register_ops(clks, nr, table,
418 &sh_clk_div4_enable_clk_ops);
419}
420
421int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
422 struct clk_div4_table *table)
423{
424 return sh_clk_div4_register_ops(clks, nr, table,
425 &sh_clk_div4_reparent_clk_ops);
426}