blob: 9dea329077958a516108882ca9d75f0540ca4cad [file] [log] [blame]
Paul Mundtde9186c2010-10-18 21:32:58 +09001/*
2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3 *
4 * Copyright (C) 2010 Magnus Damm
Paul Mundt4d6ddb02012-04-11 12:05:50 +09005 * Copyright (C) 2010 - 2012 Paul Mundt
Paul Mundtde9186c2010-10-18 21:32:58 +09006 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
Magnus Dammfa676ca2010-05-11 13:29:34 +000011#include <linux/clk.h>
12#include <linux/compiler.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/sh_clk.h>
16
Paul Mundt104fa612012-04-12 19:50:40 +090017static unsigned int sh_clk_read(struct clk *clk)
Magnus Dammfa676ca2010-05-11 13:29:34 +000018{
Paul Mundt4d6ddb02012-04-11 12:05:50 +090019 if (clk->flags & CLK_ENABLE_REG_8BIT)
Paul Mundt104fa612012-04-12 19:50:40 +090020 return ioread8(clk->mapped_reg);
Paul Mundt4d6ddb02012-04-11 12:05:50 +090021 else if (clk->flags & CLK_ENABLE_REG_16BIT)
Paul Mundt104fa612012-04-12 19:50:40 +090022 return ioread16(clk->mapped_reg);
Paul Mundt4d6ddb02012-04-11 12:05:50 +090023
Paul Mundt104fa612012-04-12 19:50:40 +090024 return ioread32(clk->mapped_reg);
25}
26
27static void sh_clk_write(int value, struct clk *clk)
28{
29 if (clk->flags & CLK_ENABLE_REG_8BIT)
30 iowrite8(value, clk->mapped_reg);
31 else if (clk->flags & CLK_ENABLE_REG_16BIT)
32 iowrite16(value, clk->mapped_reg);
33 else
34 iowrite32(value, clk->mapped_reg);
35}
36
37static int sh_clk_mstp_enable(struct clk *clk)
38{
39 sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +000040 return 0;
41}
42
Paul Mundt4d6ddb02012-04-11 12:05:50 +090043static void sh_clk_mstp_disable(struct clk *clk)
Magnus Dammfa676ca2010-05-11 13:29:34 +000044{
Paul Mundt104fa612012-04-12 19:50:40 +090045 sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +000046}
47
Paul Mundt4d6ddb02012-04-11 12:05:50 +090048static struct sh_clk_ops sh_clk_mstp_clk_ops = {
49 .enable = sh_clk_mstp_enable,
50 .disable = sh_clk_mstp_disable,
Magnus Dammfa676ca2010-05-11 13:29:34 +000051 .recalc = followparent_recalc,
52};
53
Paul Mundt4d6ddb02012-04-11 12:05:50 +090054int __init sh_clk_mstp_register(struct clk *clks, int nr)
Magnus Dammfa676ca2010-05-11 13:29:34 +000055{
56 struct clk *clkp;
57 int ret = 0;
58 int k;
59
60 for (k = 0; !ret && (k < nr); k++) {
61 clkp = clks + k;
Paul Mundt4d6ddb02012-04-11 12:05:50 +090062 clkp->ops = &sh_clk_mstp_clk_ops;
Magnus Dammfa676ca2010-05-11 13:29:34 +000063 ret |= clk_register(clkp);
64 }
65
66 return ret;
67}
68
69static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
70{
71 return clk_rate_table_round(clk, clk->freq_table, rate);
72}
73
Paul Mundta60977a2012-05-25 14:59:26 +090074/*
75 * Div/mult table lookup helpers
76 */
77static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
78{
79 return clk->priv;
80}
81
82static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
83{
84 return clk_to_div_table(clk)->div_mult_table;
85}
86
87/*
88 * div6 support
89 */
Magnus Dammfa676ca2010-05-11 13:29:34 +000090static int sh_clk_div6_divisors[64] = {
91 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
92 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
93 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
94 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
95};
96
Paul Mundta60977a2012-05-25 14:59:26 +090097static struct clk_div_mult_table div6_div_mult_table = {
Magnus Dammfa676ca2010-05-11 13:29:34 +000098 .divisors = sh_clk_div6_divisors,
99 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
100};
101
Paul Mundta60977a2012-05-25 14:59:26 +0900102static struct clk_div_table sh_clk_div6_table = {
103 .div_mult_table = &div6_div_mult_table,
104};
105
Magnus Dammfa676ca2010-05-11 13:29:34 +0000106static unsigned long sh_clk_div6_recalc(struct clk *clk)
107{
Paul Mundta60977a2012-05-25 14:59:26 +0900108 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000109 unsigned int idx;
110
111 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
112 table, NULL);
113
Paul Mundt104fa612012-04-12 19:50:40 +0900114 idx = sh_clk_read(clk) & 0x003f;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000115
116 return clk->freq_table[idx].frequency;
117}
118
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000119static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
120{
Paul Mundta60977a2012-05-25 14:59:26 +0900121 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000122 u32 value;
123 int ret, i;
124
125 if (!clk->parent_table || !clk->parent_num)
126 return -EINVAL;
127
128 /* Search the parent */
129 for (i = 0; i < clk->parent_num; i++)
130 if (clk->parent_table[i] == parent)
131 break;
132
133 if (i == clk->parent_num)
134 return -ENODEV;
135
136 ret = clk_reparent(clk, parent);
137 if (ret < 0)
138 return ret;
139
Paul Mundt104fa612012-04-12 19:50:40 +0900140 value = sh_clk_read(clk) &
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000141 ~(((1 << clk->src_width) - 1) << clk->src_shift);
142
Paul Mundt104fa612012-04-12 19:50:40 +0900143 sh_clk_write(value | (i << clk->src_shift), clk);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000144
145 /* Rebuild the frequency table */
146 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
Kuninori Morimoto52c10ad2011-04-14 17:13:53 +0900147 table, NULL);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000148
149 return 0;
150}
151
Paul Mundt35a96c72010-11-15 18:18:32 +0900152static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
Magnus Dammfa676ca2010-05-11 13:29:34 +0000153{
154 unsigned long value;
155 int idx;
156
157 idx = clk_rate_table_find(clk, clk->freq_table, rate);
158 if (idx < 0)
159 return idx;
160
Paul Mundt104fa612012-04-12 19:50:40 +0900161 value = sh_clk_read(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000162 value &= ~0x3f;
163 value |= idx;
Paul Mundt104fa612012-04-12 19:50:40 +0900164 sh_clk_write(value, clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000165 return 0;
166}
167
168static int sh_clk_div6_enable(struct clk *clk)
169{
170 unsigned long value;
171 int ret;
172
Paul Mundtf278ea82010-11-19 16:40:35 +0900173 ret = sh_clk_div6_set_rate(clk, clk->rate);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000174 if (ret == 0) {
Paul Mundt104fa612012-04-12 19:50:40 +0900175 value = sh_clk_read(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000176 value &= ~0x100; /* clear stop bit to enable clock */
Paul Mundt104fa612012-04-12 19:50:40 +0900177 sh_clk_write(value, clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000178 }
179 return ret;
180}
181
182static void sh_clk_div6_disable(struct clk *clk)
183{
184 unsigned long value;
185
Paul Mundt104fa612012-04-12 19:50:40 +0900186 value = sh_clk_read(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000187 value |= 0x100; /* stop clock */
188 value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
Paul Mundt104fa612012-04-12 19:50:40 +0900189 sh_clk_write(value, clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000190}
191
Magnus Damma0ec3602012-02-29 22:16:21 +0900192static struct sh_clk_ops sh_clk_div6_clk_ops = {
Magnus Dammfa676ca2010-05-11 13:29:34 +0000193 .recalc = sh_clk_div6_recalc,
194 .round_rate = sh_clk_div_round_rate,
195 .set_rate = sh_clk_div6_set_rate,
196 .enable = sh_clk_div6_enable,
197 .disable = sh_clk_div6_disable,
198};
199
Magnus Damma0ec3602012-02-29 22:16:21 +0900200static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000201 .recalc = sh_clk_div6_recalc,
202 .round_rate = sh_clk_div_round_rate,
203 .set_rate = sh_clk_div6_set_rate,
204 .enable = sh_clk_div6_enable,
205 .disable = sh_clk_div6_disable,
206 .set_parent = sh_clk_div6_set_parent,
207};
208
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800209static int __init sh_clk_init_parent(struct clk *clk)
210{
211 u32 val;
212
213 if (clk->parent)
214 return 0;
215
216 if (!clk->parent_table || !clk->parent_num)
217 return 0;
218
219 if (!clk->src_width) {
220 pr_err("sh_clk_init_parent: cannot select parent clock\n");
221 return -EINVAL;
222 }
223
Paul Mundt104fa612012-04-12 19:50:40 +0900224 val = (sh_clk_read(clk) >> clk->src_shift);
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800225 val &= (1 << clk->src_width) - 1;
226
227 if (val >= clk->parent_num) {
228 pr_err("sh_clk_init_parent: parent table size failed\n");
229 return -EINVAL;
230 }
231
Kuninori Morimoto64dea572012-01-19 01:00:40 -0800232 clk_reparent(clk, clk->parent_table[val]);
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800233 if (!clk->parent) {
234 pr_err("sh_clk_init_parent: unable to set parent");
235 return -EINVAL;
236 }
237
238 return 0;
239}
240
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000241static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
Magnus Damma0ec3602012-02-29 22:16:21 +0900242 struct sh_clk_ops *ops)
Magnus Dammfa676ca2010-05-11 13:29:34 +0000243{
244 struct clk *clkp;
245 void *freq_table;
Paul Mundta60977a2012-05-25 14:59:26 +0900246 struct clk_div_table *table = &sh_clk_div6_table;
247 int nr_divs = table->div_mult_table->nr_divisors;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000248 int freq_table_size = sizeof(struct cpufreq_frequency_table);
249 int ret = 0;
250 int k;
251
252 freq_table_size *= (nr_divs + 1);
253 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
254 if (!freq_table) {
255 pr_err("sh_clk_div6_register: unable to alloc memory\n");
256 return -ENOMEM;
257 }
258
259 for (k = 0; !ret && (k < nr); k++) {
260 clkp = clks + k;
261
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000262 clkp->ops = ops;
Paul Mundta60977a2012-05-25 14:59:26 +0900263 clkp->priv = table;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000264 clkp->freq_table = freq_table + (k * freq_table_size);
265 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
Kuninori Morimoto7784f4d2011-12-11 19:02:09 -0800266 ret = clk_register(clkp);
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800267 if (ret < 0)
268 break;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000269
Kuninori Morimoto7784f4d2011-12-11 19:02:09 -0800270 ret = sh_clk_init_parent(clkp);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000271 }
272
273 return ret;
274}
275
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000276int __init sh_clk_div6_register(struct clk *clks, int nr)
277{
278 return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops);
279}
280
281int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
282{
283 return sh_clk_div6_register_ops(clks, nr,
284 &sh_clk_div6_reparent_clk_ops);
285}
286
Paul Mundta60977a2012-05-25 14:59:26 +0900287/*
288 * div4 support
289 */
Magnus Dammfa676ca2010-05-11 13:29:34 +0000290static unsigned long sh_clk_div4_recalc(struct clk *clk)
291{
Paul Mundta60977a2012-05-25 14:59:26 +0900292 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000293 unsigned int idx;
294
295 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
296 table, &clk->arch_flags);
297
Paul Mundt104fa612012-04-12 19:50:40 +0900298 idx = (sh_clk_read(clk) >> clk->enable_bit) & 0x000f;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000299
300 return clk->freq_table[idx].frequency;
301}
302
303static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
304{
Paul Mundta60977a2012-05-25 14:59:26 +0900305 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000306 u32 value;
307 int ret;
308
309 /* we really need a better way to determine parent index, but for
310 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
311 * no CLK_ENABLE_ON_INIT means external clock...
312 */
313
314 if (parent->flags & CLK_ENABLE_ON_INIT)
Paul Mundt104fa612012-04-12 19:50:40 +0900315 value = sh_clk_read(clk) & ~(1 << 7);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000316 else
Paul Mundt104fa612012-04-12 19:50:40 +0900317 value = sh_clk_read(clk) | (1 << 7);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000318
319 ret = clk_reparent(clk, parent);
320 if (ret < 0)
321 return ret;
322
Paul Mundt104fa612012-04-12 19:50:40 +0900323 sh_clk_write(value, clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000324
325 /* Rebiuld the frequency table */
326 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
327 table, &clk->arch_flags);
328
329 return 0;
330}
331
Paul Mundt35a96c72010-11-15 18:18:32 +0900332static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
Magnus Dammfa676ca2010-05-11 13:29:34 +0000333{
Paul Mundta60977a2012-05-25 14:59:26 +0900334 struct clk_div_table *dt = clk_to_div_table(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000335 unsigned long value;
336 int idx = clk_rate_table_find(clk, clk->freq_table, rate);
337 if (idx < 0)
338 return idx;
339
Paul Mundt104fa612012-04-12 19:50:40 +0900340 value = sh_clk_read(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000341 value &= ~(0xf << clk->enable_bit);
342 value |= (idx << clk->enable_bit);
Paul Mundt104fa612012-04-12 19:50:40 +0900343 sh_clk_write(value, clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000344
Paul Mundta60977a2012-05-25 14:59:26 +0900345 /* XXX: Should use a post-change notifier */
346 if (dt->kick)
347 dt->kick(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000348
349 return 0;
350}
351
352static int sh_clk_div4_enable(struct clk *clk)
353{
Paul Mundt104fa612012-04-12 19:50:40 +0900354 sh_clk_write(sh_clk_read(clk) & ~(1 << 8), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000355 return 0;
356}
357
358static void sh_clk_div4_disable(struct clk *clk)
359{
Paul Mundt104fa612012-04-12 19:50:40 +0900360 sh_clk_write(sh_clk_read(clk) | (1 << 8), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000361}
362
Magnus Damma0ec3602012-02-29 22:16:21 +0900363static struct sh_clk_ops sh_clk_div4_clk_ops = {
Magnus Dammfa676ca2010-05-11 13:29:34 +0000364 .recalc = sh_clk_div4_recalc,
365 .set_rate = sh_clk_div4_set_rate,
366 .round_rate = sh_clk_div_round_rate,
367};
368
Magnus Damma0ec3602012-02-29 22:16:21 +0900369static struct sh_clk_ops sh_clk_div4_enable_clk_ops = {
Magnus Dammfa676ca2010-05-11 13:29:34 +0000370 .recalc = sh_clk_div4_recalc,
371 .set_rate = sh_clk_div4_set_rate,
372 .round_rate = sh_clk_div_round_rate,
373 .enable = sh_clk_div4_enable,
374 .disable = sh_clk_div4_disable,
375};
376
Magnus Damma0ec3602012-02-29 22:16:21 +0900377static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
Magnus Dammfa676ca2010-05-11 13:29:34 +0000378 .recalc = sh_clk_div4_recalc,
379 .set_rate = sh_clk_div4_set_rate,
380 .round_rate = sh_clk_div_round_rate,
381 .enable = sh_clk_div4_enable,
382 .disable = sh_clk_div4_disable,
383 .set_parent = sh_clk_div4_set_parent,
384};
385
386static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
Magnus Damma0ec3602012-02-29 22:16:21 +0900387 struct clk_div4_table *table, struct sh_clk_ops *ops)
Magnus Dammfa676ca2010-05-11 13:29:34 +0000388{
389 struct clk *clkp;
390 void *freq_table;
391 int nr_divs = table->div_mult_table->nr_divisors;
392 int freq_table_size = sizeof(struct cpufreq_frequency_table);
393 int ret = 0;
394 int k;
395
396 freq_table_size *= (nr_divs + 1);
397 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
398 if (!freq_table) {
399 pr_err("sh_clk_div4_register: unable to alloc memory\n");
400 return -ENOMEM;
401 }
402
403 for (k = 0; !ret && (k < nr); k++) {
404 clkp = clks + k;
405
406 clkp->ops = ops;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000407 clkp->priv = table;
408
409 clkp->freq_table = freq_table + (k * freq_table_size);
410 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
411
412 ret = clk_register(clkp);
413 }
414
415 return ret;
416}
417
418int __init sh_clk_div4_register(struct clk *clks, int nr,
419 struct clk_div4_table *table)
420{
421 return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
422}
423
424int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
425 struct clk_div4_table *table)
426{
427 return sh_clk_div4_register_ops(clks, nr, table,
428 &sh_clk_div4_enable_clk_ops);
429}
430
431int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
432 struct clk_div4_table *table)
433{
434 return sh_clk_div4_register_ops(clks, nr, table,
435 &sh_clk_div4_reparent_clk_ops);
436}