blob: 1ebe67cd18333c3b39f98d0e2badcd7b0a948090 [file] [log] [blame]
Paul Mundtde9186c2010-10-18 21:32:58 +09001/*
2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3 *
4 * Copyright (C) 2010 Magnus Damm
Paul Mundt4d6ddb02012-04-11 12:05:50 +09005 * Copyright (C) 2010 - 2012 Paul Mundt
Paul Mundtde9186c2010-10-18 21:32:58 +09006 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
Magnus Dammfa676ca2010-05-11 13:29:34 +000011#include <linux/clk.h>
12#include <linux/compiler.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/sh_clk.h>
16
Paul Mundt764f4e42012-05-25 16:34:48 +090017#define CPG_CKSTP_BIT BIT(8)
18
Paul Mundt104fa612012-04-12 19:50:40 +090019static unsigned int sh_clk_read(struct clk *clk)
Magnus Dammfa676ca2010-05-11 13:29:34 +000020{
Paul Mundt4d6ddb02012-04-11 12:05:50 +090021 if (clk->flags & CLK_ENABLE_REG_8BIT)
Paul Mundt104fa612012-04-12 19:50:40 +090022 return ioread8(clk->mapped_reg);
Paul Mundt4d6ddb02012-04-11 12:05:50 +090023 else if (clk->flags & CLK_ENABLE_REG_16BIT)
Paul Mundt104fa612012-04-12 19:50:40 +090024 return ioread16(clk->mapped_reg);
Paul Mundt4d6ddb02012-04-11 12:05:50 +090025
Paul Mundt104fa612012-04-12 19:50:40 +090026 return ioread32(clk->mapped_reg);
27}
28
29static void sh_clk_write(int value, struct clk *clk)
30{
31 if (clk->flags & CLK_ENABLE_REG_8BIT)
32 iowrite8(value, clk->mapped_reg);
33 else if (clk->flags & CLK_ENABLE_REG_16BIT)
34 iowrite16(value, clk->mapped_reg);
35 else
36 iowrite32(value, clk->mapped_reg);
37}
38
39static int sh_clk_mstp_enable(struct clk *clk)
40{
41 sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +000042 return 0;
43}
44
Paul Mundt4d6ddb02012-04-11 12:05:50 +090045static void sh_clk_mstp_disable(struct clk *clk)
Magnus Dammfa676ca2010-05-11 13:29:34 +000046{
Paul Mundt104fa612012-04-12 19:50:40 +090047 sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +000048}
49
Paul Mundt4d6ddb02012-04-11 12:05:50 +090050static struct sh_clk_ops sh_clk_mstp_clk_ops = {
51 .enable = sh_clk_mstp_enable,
52 .disable = sh_clk_mstp_disable,
Magnus Dammfa676ca2010-05-11 13:29:34 +000053 .recalc = followparent_recalc,
54};
55
Paul Mundt4d6ddb02012-04-11 12:05:50 +090056int __init sh_clk_mstp_register(struct clk *clks, int nr)
Magnus Dammfa676ca2010-05-11 13:29:34 +000057{
58 struct clk *clkp;
59 int ret = 0;
60 int k;
61
62 for (k = 0; !ret && (k < nr); k++) {
63 clkp = clks + k;
Paul Mundt4d6ddb02012-04-11 12:05:50 +090064 clkp->ops = &sh_clk_mstp_clk_ops;
Magnus Dammfa676ca2010-05-11 13:29:34 +000065 ret |= clk_register(clkp);
66 }
67
68 return ret;
69}
70
Paul Mundta60977a2012-05-25 14:59:26 +090071/*
72 * Div/mult table lookup helpers
73 */
74static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
75{
76 return clk->priv;
77}
78
79static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
80{
81 return clk_to_div_table(clk)->div_mult_table;
82}
83
84/*
Paul Mundt75f5f8a2012-05-25 15:26:01 +090085 * Common div ops
86 */
87static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
88{
89 return clk_rate_table_round(clk, clk->freq_table, rate);
90}
91
92static unsigned long sh_clk_div_recalc(struct clk *clk)
93{
94 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
95 unsigned int idx;
96
97 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
98 table, clk->arch_flags ? &clk->arch_flags : NULL);
99
100 idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
101
102 return clk->freq_table[idx].frequency;
103}
104
Paul Mundt0fa22162012-05-25 15:52:10 +0900105static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
106{
107 struct clk_div_table *dt = clk_to_div_table(clk);
108 unsigned long value;
109 int idx;
110
111 idx = clk_rate_table_find(clk, clk->freq_table, rate);
112 if (idx < 0)
113 return idx;
114
115 value = sh_clk_read(clk);
116 value &= ~(clk->div_mask << clk->enable_bit);
117 value |= (idx << clk->enable_bit);
118 sh_clk_write(value, clk);
119
120 /* XXX: Should use a post-change notifier */
121 if (dt->kick)
122 dt->kick(clk);
123
124 return 0;
125}
126
Paul Mundt764f4e42012-05-25 16:34:48 +0900127static int sh_clk_div_enable(struct clk *clk)
128{
Kuninori Morimoto5a799b82012-11-25 22:01:46 -0800129 if (clk->div_mask == SH_CLK_DIV6_MSK) {
130 int ret = sh_clk_div_set_rate(clk, clk->rate);
131 if (ret < 0)
132 return ret;
133 }
134
Paul Mundt764f4e42012-05-25 16:34:48 +0900135 sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
136 return 0;
137}
138
139static void sh_clk_div_disable(struct clk *clk)
140{
141 unsigned int val;
142
143 val = sh_clk_read(clk);
144 val |= CPG_CKSTP_BIT;
145
146 /*
147 * div6 clocks require the divisor field to be non-zero or the
148 * above CKSTP toggle silently fails. Ensure that the divisor
149 * array is reset to its initial state on disable.
150 */
151 if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
152 val |= clk->div_mask;
153
154 sh_clk_write(val, clk);
155}
156
Paul Mundte3c87602012-05-25 16:43:42 +0900157static struct sh_clk_ops sh_clk_div_clk_ops = {
158 .recalc = sh_clk_div_recalc,
159 .set_rate = sh_clk_div_set_rate,
160 .round_rate = sh_clk_div_round_rate,
161};
162
163static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
164 .recalc = sh_clk_div_recalc,
165 .set_rate = sh_clk_div_set_rate,
166 .round_rate = sh_clk_div_round_rate,
167 .enable = sh_clk_div_enable,
168 .disable = sh_clk_div_disable,
169};
170
Paul Mundt609d7552012-05-25 16:55:05 +0900171static int __init sh_clk_init_parent(struct clk *clk)
172{
173 u32 val;
174
175 if (clk->parent)
176 return 0;
177
178 if (!clk->parent_table || !clk->parent_num)
179 return 0;
180
181 if (!clk->src_width) {
182 pr_err("sh_clk_init_parent: cannot select parent clock\n");
183 return -EINVAL;
184 }
185
186 val = (sh_clk_read(clk) >> clk->src_shift);
187 val &= (1 << clk->src_width) - 1;
188
189 if (val >= clk->parent_num) {
190 pr_err("sh_clk_init_parent: parent table size failed\n");
191 return -EINVAL;
192 }
193
194 clk_reparent(clk, clk->parent_table[val]);
195 if (!clk->parent) {
196 pr_err("sh_clk_init_parent: unable to set parent");
197 return -EINVAL;
198 }
199
200 return 0;
201}
202
203static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
204 struct clk_div_table *table, struct sh_clk_ops *ops)
205{
206 struct clk *clkp;
207 void *freq_table;
208 int nr_divs = table->div_mult_table->nr_divisors;
209 int freq_table_size = sizeof(struct cpufreq_frequency_table);
210 int ret = 0;
211 int k;
212
213 freq_table_size *= (nr_divs + 1);
214 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
215 if (!freq_table) {
216 pr_err("%s: unable to alloc memory\n", __func__);
217 return -ENOMEM;
218 }
219
220 for (k = 0; !ret && (k < nr); k++) {
221 clkp = clks + k;
222
223 clkp->ops = ops;
224 clkp->priv = table;
225
226 clkp->freq_table = freq_table + (k * freq_table_size);
227 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
228
229 ret = clk_register(clkp);
230 if (ret == 0)
231 ret = sh_clk_init_parent(clkp);
232 }
233
234 return ret;
235}
236
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900237/*
Paul Mundta60977a2012-05-25 14:59:26 +0900238 * div6 support
239 */
Magnus Dammfa676ca2010-05-11 13:29:34 +0000240static int sh_clk_div6_divisors[64] = {
241 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
242 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
243 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
244 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
245};
246
Paul Mundta60977a2012-05-25 14:59:26 +0900247static struct clk_div_mult_table div6_div_mult_table = {
Magnus Dammfa676ca2010-05-11 13:29:34 +0000248 .divisors = sh_clk_div6_divisors,
249 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
250};
251
Paul Mundta60977a2012-05-25 14:59:26 +0900252static struct clk_div_table sh_clk_div6_table = {
253 .div_mult_table = &div6_div_mult_table,
254};
255
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000256static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
257{
Paul Mundta60977a2012-05-25 14:59:26 +0900258 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000259 u32 value;
260 int ret, i;
261
262 if (!clk->parent_table || !clk->parent_num)
263 return -EINVAL;
264
265 /* Search the parent */
266 for (i = 0; i < clk->parent_num; i++)
267 if (clk->parent_table[i] == parent)
268 break;
269
270 if (i == clk->parent_num)
271 return -ENODEV;
272
273 ret = clk_reparent(clk, parent);
274 if (ret < 0)
275 return ret;
276
Paul Mundt104fa612012-04-12 19:50:40 +0900277 value = sh_clk_read(clk) &
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000278 ~(((1 << clk->src_width) - 1) << clk->src_shift);
279
Paul Mundt104fa612012-04-12 19:50:40 +0900280 sh_clk_write(value | (i << clk->src_shift), clk);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000281
282 /* Rebuild the frequency table */
283 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
Kuninori Morimoto52c10ad2011-04-14 17:13:53 +0900284 table, NULL);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000285
286 return 0;
287}
288
Magnus Damma0ec3602012-02-29 22:16:21 +0900289static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900290 .recalc = sh_clk_div_recalc,
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000291 .round_rate = sh_clk_div_round_rate,
Paul Mundt0fa22162012-05-25 15:52:10 +0900292 .set_rate = sh_clk_div_set_rate,
Paul Mundt764f4e42012-05-25 16:34:48 +0900293 .enable = sh_clk_div_enable,
294 .disable = sh_clk_div_disable,
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000295 .set_parent = sh_clk_div6_set_parent,
296};
297
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000298int __init sh_clk_div6_register(struct clk *clks, int nr)
299{
Paul Mundt609d7552012-05-25 16:55:05 +0900300 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
301 &sh_clk_div_enable_clk_ops);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000302}
303
304int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
305{
Paul Mundt609d7552012-05-25 16:55:05 +0900306 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
307 &sh_clk_div6_reparent_clk_ops);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000308}
309
Paul Mundta60977a2012-05-25 14:59:26 +0900310/*
311 * div4 support
312 */
Magnus Dammfa676ca2010-05-11 13:29:34 +0000313static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
314{
Paul Mundta60977a2012-05-25 14:59:26 +0900315 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000316 u32 value;
317 int ret;
318
319 /* we really need a better way to determine parent index, but for
320 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
321 * no CLK_ENABLE_ON_INIT means external clock...
322 */
323
324 if (parent->flags & CLK_ENABLE_ON_INIT)
Paul Mundt104fa612012-04-12 19:50:40 +0900325 value = sh_clk_read(clk) & ~(1 << 7);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000326 else
Paul Mundt104fa612012-04-12 19:50:40 +0900327 value = sh_clk_read(clk) | (1 << 7);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000328
329 ret = clk_reparent(clk, parent);
330 if (ret < 0)
331 return ret;
332
Paul Mundt104fa612012-04-12 19:50:40 +0900333 sh_clk_write(value, clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000334
335 /* Rebiuld the frequency table */
336 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
337 table, &clk->arch_flags);
338
339 return 0;
340}
341
Magnus Damma0ec3602012-02-29 22:16:21 +0900342static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900343 .recalc = sh_clk_div_recalc,
Paul Mundt0fa22162012-05-25 15:52:10 +0900344 .set_rate = sh_clk_div_set_rate,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000345 .round_rate = sh_clk_div_round_rate,
Paul Mundt764f4e42012-05-25 16:34:48 +0900346 .enable = sh_clk_div_enable,
347 .disable = sh_clk_div_disable,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000348 .set_parent = sh_clk_div4_set_parent,
349};
350
Magnus Dammfa676ca2010-05-11 13:29:34 +0000351int __init sh_clk_div4_register(struct clk *clks, int nr,
352 struct clk_div4_table *table)
353{
Paul Mundt609d7552012-05-25 16:55:05 +0900354 return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000355}
356
357int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
358 struct clk_div4_table *table)
359{
Paul Mundt609d7552012-05-25 16:55:05 +0900360 return sh_clk_div_register_ops(clks, nr, table,
361 &sh_clk_div_enable_clk_ops);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000362}
363
364int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
365 struct clk_div4_table *table)
366{
Paul Mundt609d7552012-05-25 16:55:05 +0900367 return sh_clk_div_register_ops(clks, nr, table,
368 &sh_clk_div4_reparent_clk_ops);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000369}
Kuninori Morimoto9d626ec2012-10-30 20:06:55 -0700370
371/* FSI-DIV */
372static unsigned long fsidiv_recalc(struct clk *clk)
373{
374 u32 value;
375
376 value = __raw_readl(clk->mapping->base);
377
378 value >>= 16;
379 if (value < 2)
380 return clk->parent->rate;
381
382 return clk->parent->rate / value;
383}
384
385static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
386{
387 return clk_rate_div_range_round(clk, 1, 0xffff, rate);
388}
389
390static void fsidiv_disable(struct clk *clk)
391{
392 __raw_writel(0, clk->mapping->base);
393}
394
395static int fsidiv_enable(struct clk *clk)
396{
397 u32 value;
398
399 value = __raw_readl(clk->mapping->base) >> 16;
400 if (value < 2)
401 return 0;
402
403 __raw_writel((value << 16) | 0x3, clk->mapping->base);
404
405 return 0;
406}
407
408static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
409{
Kuninori Morimoto9d626ec2012-10-30 20:06:55 -0700410 int idx;
411
412 idx = (clk->parent->rate / rate) & 0xffff;
413 if (idx < 2)
414 __raw_writel(0, clk->mapping->base);
415 else
416 __raw_writel(idx << 16, clk->mapping->base);
417
418 return 0;
419}
420
421static struct sh_clk_ops fsidiv_clk_ops = {
422 .recalc = fsidiv_recalc,
423 .round_rate = fsidiv_round_rate,
424 .set_rate = fsidiv_set_rate,
425 .enable = fsidiv_enable,
426 .disable = fsidiv_disable,
427};
428
429int __init sh_clk_fsidiv_register(struct clk *clks, int nr)
430{
431 struct clk_mapping *map;
432 int i;
433
434 for (i = 0; i < nr; i++) {
435
436 map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL);
437 if (!map) {
438 pr_err("%s: unable to alloc memory\n", __func__);
439 return -ENOMEM;
440 }
441
442 /* clks[i].enable_reg came from SH_CLK_FSIDIV() */
443 map->phys = (phys_addr_t)clks[i].enable_reg;
444 map->len = 8;
445
446 clks[i].enable_reg = 0; /* remove .enable_reg */
447 clks[i].ops = &fsidiv_clk_ops;
448 clks[i].mapping = map;
449
450 clk_register(&clks[i]);
451 }
452
453 return 0;
454}