blob: 5aedcdf4ac5cd185a2ffac8ebcca2e3dcffd6c27 [file] [log] [blame]
Paul Mundtde9186c2010-10-18 21:32:58 +09001/*
2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3 *
4 * Copyright (C) 2010 Magnus Damm
Paul Mundt4d6ddb02012-04-11 12:05:50 +09005 * Copyright (C) 2010 - 2012 Paul Mundt
Paul Mundtde9186c2010-10-18 21:32:58 +09006 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
Magnus Dammfa676ca2010-05-11 13:29:34 +000011#include <linux/clk.h>
12#include <linux/compiler.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/sh_clk.h>
16
Paul Mundt764f4e42012-05-25 16:34:48 +090017#define CPG_CKSTP_BIT BIT(8)
18
Paul Mundt104fa612012-04-12 19:50:40 +090019static unsigned int sh_clk_read(struct clk *clk)
Magnus Dammfa676ca2010-05-11 13:29:34 +000020{
Paul Mundt4d6ddb02012-04-11 12:05:50 +090021 if (clk->flags & CLK_ENABLE_REG_8BIT)
Paul Mundt104fa612012-04-12 19:50:40 +090022 return ioread8(clk->mapped_reg);
Paul Mundt4d6ddb02012-04-11 12:05:50 +090023 else if (clk->flags & CLK_ENABLE_REG_16BIT)
Paul Mundt104fa612012-04-12 19:50:40 +090024 return ioread16(clk->mapped_reg);
Paul Mundt4d6ddb02012-04-11 12:05:50 +090025
Paul Mundt104fa612012-04-12 19:50:40 +090026 return ioread32(clk->mapped_reg);
27}
28
29static void sh_clk_write(int value, struct clk *clk)
30{
31 if (clk->flags & CLK_ENABLE_REG_8BIT)
32 iowrite8(value, clk->mapped_reg);
33 else if (clk->flags & CLK_ENABLE_REG_16BIT)
34 iowrite16(value, clk->mapped_reg);
35 else
36 iowrite32(value, clk->mapped_reg);
37}
38
39static int sh_clk_mstp_enable(struct clk *clk)
40{
41 sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +000042 return 0;
43}
44
Paul Mundt4d6ddb02012-04-11 12:05:50 +090045static void sh_clk_mstp_disable(struct clk *clk)
Magnus Dammfa676ca2010-05-11 13:29:34 +000046{
Paul Mundt104fa612012-04-12 19:50:40 +090047 sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +000048}
49
Paul Mundt4d6ddb02012-04-11 12:05:50 +090050static struct sh_clk_ops sh_clk_mstp_clk_ops = {
51 .enable = sh_clk_mstp_enable,
52 .disable = sh_clk_mstp_disable,
Magnus Dammfa676ca2010-05-11 13:29:34 +000053 .recalc = followparent_recalc,
54};
55
Paul Mundt4d6ddb02012-04-11 12:05:50 +090056int __init sh_clk_mstp_register(struct clk *clks, int nr)
Magnus Dammfa676ca2010-05-11 13:29:34 +000057{
58 struct clk *clkp;
59 int ret = 0;
60 int k;
61
62 for (k = 0; !ret && (k < nr); k++) {
63 clkp = clks + k;
Paul Mundt4d6ddb02012-04-11 12:05:50 +090064 clkp->ops = &sh_clk_mstp_clk_ops;
Magnus Dammfa676ca2010-05-11 13:29:34 +000065 ret |= clk_register(clkp);
66 }
67
68 return ret;
69}
70
Paul Mundta60977a2012-05-25 14:59:26 +090071/*
72 * Div/mult table lookup helpers
73 */
74static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
75{
76 return clk->priv;
77}
78
79static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
80{
81 return clk_to_div_table(clk)->div_mult_table;
82}
83
84/*
Paul Mundt75f5f8a2012-05-25 15:26:01 +090085 * Common div ops
86 */
87static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
88{
89 return clk_rate_table_round(clk, clk->freq_table, rate);
90}
91
92static unsigned long sh_clk_div_recalc(struct clk *clk)
93{
94 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
95 unsigned int idx;
96
97 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
98 table, clk->arch_flags ? &clk->arch_flags : NULL);
99
100 idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
101
102 return clk->freq_table[idx].frequency;
103}
104
Paul Mundt0fa22162012-05-25 15:52:10 +0900105static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
106{
107 struct clk_div_table *dt = clk_to_div_table(clk);
108 unsigned long value;
109 int idx;
110
111 idx = clk_rate_table_find(clk, clk->freq_table, rate);
112 if (idx < 0)
113 return idx;
114
115 value = sh_clk_read(clk);
116 value &= ~(clk->div_mask << clk->enable_bit);
117 value |= (idx << clk->enable_bit);
118 sh_clk_write(value, clk);
119
120 /* XXX: Should use a post-change notifier */
121 if (dt->kick)
122 dt->kick(clk);
123
124 return 0;
125}
126
Paul Mundt764f4e42012-05-25 16:34:48 +0900127static int sh_clk_div_enable(struct clk *clk)
128{
129 sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
130 return 0;
131}
132
133static void sh_clk_div_disable(struct clk *clk)
134{
135 unsigned int val;
136
137 val = sh_clk_read(clk);
138 val |= CPG_CKSTP_BIT;
139
140 /*
141 * div6 clocks require the divisor field to be non-zero or the
142 * above CKSTP toggle silently fails. Ensure that the divisor
143 * array is reset to its initial state on disable.
144 */
145 if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
146 val |= clk->div_mask;
147
148 sh_clk_write(val, clk);
149}
150
Paul Mundte3c87602012-05-25 16:43:42 +0900151static struct sh_clk_ops sh_clk_div_clk_ops = {
152 .recalc = sh_clk_div_recalc,
153 .set_rate = sh_clk_div_set_rate,
154 .round_rate = sh_clk_div_round_rate,
155};
156
157static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
158 .recalc = sh_clk_div_recalc,
159 .set_rate = sh_clk_div_set_rate,
160 .round_rate = sh_clk_div_round_rate,
161 .enable = sh_clk_div_enable,
162 .disable = sh_clk_div_disable,
163};
164
Paul Mundt609d7552012-05-25 16:55:05 +0900165static int __init sh_clk_init_parent(struct clk *clk)
166{
167 u32 val;
168
169 if (clk->parent)
170 return 0;
171
172 if (!clk->parent_table || !clk->parent_num)
173 return 0;
174
175 if (!clk->src_width) {
176 pr_err("sh_clk_init_parent: cannot select parent clock\n");
177 return -EINVAL;
178 }
179
180 val = (sh_clk_read(clk) >> clk->src_shift);
181 val &= (1 << clk->src_width) - 1;
182
183 if (val >= clk->parent_num) {
184 pr_err("sh_clk_init_parent: parent table size failed\n");
185 return -EINVAL;
186 }
187
188 clk_reparent(clk, clk->parent_table[val]);
189 if (!clk->parent) {
190 pr_err("sh_clk_init_parent: unable to set parent");
191 return -EINVAL;
192 }
193
194 return 0;
195}
196
197static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
198 struct clk_div_table *table, struct sh_clk_ops *ops)
199{
200 struct clk *clkp;
201 void *freq_table;
202 int nr_divs = table->div_mult_table->nr_divisors;
203 int freq_table_size = sizeof(struct cpufreq_frequency_table);
204 int ret = 0;
205 int k;
206
207 freq_table_size *= (nr_divs + 1);
208 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
209 if (!freq_table) {
210 pr_err("%s: unable to alloc memory\n", __func__);
211 return -ENOMEM;
212 }
213
214 for (k = 0; !ret && (k < nr); k++) {
215 clkp = clks + k;
216
217 clkp->ops = ops;
218 clkp->priv = table;
219
220 clkp->freq_table = freq_table + (k * freq_table_size);
221 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
222
223 ret = clk_register(clkp);
224 if (ret == 0)
225 ret = sh_clk_init_parent(clkp);
226 }
227
228 return ret;
229}
230
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900231/*
Paul Mundta60977a2012-05-25 14:59:26 +0900232 * div6 support
233 */
Magnus Dammfa676ca2010-05-11 13:29:34 +0000234static int sh_clk_div6_divisors[64] = {
235 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
236 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
237 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
238 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
239};
240
Paul Mundta60977a2012-05-25 14:59:26 +0900241static struct clk_div_mult_table div6_div_mult_table = {
Magnus Dammfa676ca2010-05-11 13:29:34 +0000242 .divisors = sh_clk_div6_divisors,
243 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
244};
245
Paul Mundta60977a2012-05-25 14:59:26 +0900246static struct clk_div_table sh_clk_div6_table = {
247 .div_mult_table = &div6_div_mult_table,
248};
249
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000250static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
251{
Paul Mundta60977a2012-05-25 14:59:26 +0900252 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000253 u32 value;
254 int ret, i;
255
256 if (!clk->parent_table || !clk->parent_num)
257 return -EINVAL;
258
259 /* Search the parent */
260 for (i = 0; i < clk->parent_num; i++)
261 if (clk->parent_table[i] == parent)
262 break;
263
264 if (i == clk->parent_num)
265 return -ENODEV;
266
267 ret = clk_reparent(clk, parent);
268 if (ret < 0)
269 return ret;
270
Paul Mundt104fa612012-04-12 19:50:40 +0900271 value = sh_clk_read(clk) &
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000272 ~(((1 << clk->src_width) - 1) << clk->src_shift);
273
Paul Mundt104fa612012-04-12 19:50:40 +0900274 sh_clk_write(value | (i << clk->src_shift), clk);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000275
276 /* Rebuild the frequency table */
277 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
Kuninori Morimoto52c10ad2011-04-14 17:13:53 +0900278 table, NULL);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000279
280 return 0;
281}
282
Magnus Damma0ec3602012-02-29 22:16:21 +0900283static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900284 .recalc = sh_clk_div_recalc,
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000285 .round_rate = sh_clk_div_round_rate,
Paul Mundt0fa22162012-05-25 15:52:10 +0900286 .set_rate = sh_clk_div_set_rate,
Paul Mundt764f4e42012-05-25 16:34:48 +0900287 .enable = sh_clk_div_enable,
288 .disable = sh_clk_div_disable,
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000289 .set_parent = sh_clk_div6_set_parent,
290};
291
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000292int __init sh_clk_div6_register(struct clk *clks, int nr)
293{
Paul Mundt609d7552012-05-25 16:55:05 +0900294 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
295 &sh_clk_div_enable_clk_ops);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000296}
297
298int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
299{
Paul Mundt609d7552012-05-25 16:55:05 +0900300 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
301 &sh_clk_div6_reparent_clk_ops);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000302}
303
Paul Mundta60977a2012-05-25 14:59:26 +0900304/*
305 * div4 support
306 */
Magnus Dammfa676ca2010-05-11 13:29:34 +0000307static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
308{
Paul Mundta60977a2012-05-25 14:59:26 +0900309 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000310 u32 value;
311 int ret;
312
313 /* we really need a better way to determine parent index, but for
314 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
315 * no CLK_ENABLE_ON_INIT means external clock...
316 */
317
318 if (parent->flags & CLK_ENABLE_ON_INIT)
Paul Mundt104fa612012-04-12 19:50:40 +0900319 value = sh_clk_read(clk) & ~(1 << 7);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000320 else
Paul Mundt104fa612012-04-12 19:50:40 +0900321 value = sh_clk_read(clk) | (1 << 7);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000322
323 ret = clk_reparent(clk, parent);
324 if (ret < 0)
325 return ret;
326
Paul Mundt104fa612012-04-12 19:50:40 +0900327 sh_clk_write(value, clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000328
329 /* Rebiuld the frequency table */
330 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
331 table, &clk->arch_flags);
332
333 return 0;
334}
335
Magnus Damma0ec3602012-02-29 22:16:21 +0900336static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900337 .recalc = sh_clk_div_recalc,
Paul Mundt0fa22162012-05-25 15:52:10 +0900338 .set_rate = sh_clk_div_set_rate,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000339 .round_rate = sh_clk_div_round_rate,
Paul Mundt764f4e42012-05-25 16:34:48 +0900340 .enable = sh_clk_div_enable,
341 .disable = sh_clk_div_disable,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000342 .set_parent = sh_clk_div4_set_parent,
343};
344
Magnus Dammfa676ca2010-05-11 13:29:34 +0000345int __init sh_clk_div4_register(struct clk *clks, int nr,
346 struct clk_div4_table *table)
347{
Paul Mundt609d7552012-05-25 16:55:05 +0900348 return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000349}
350
351int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
352 struct clk_div4_table *table)
353{
Paul Mundt609d7552012-05-25 16:55:05 +0900354 return sh_clk_div_register_ops(clks, nr, table,
355 &sh_clk_div_enable_clk_ops);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000356}
357
358int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
359 struct clk_div4_table *table)
360{
Paul Mundt609d7552012-05-25 16:55:05 +0900361 return sh_clk_div_register_ops(clks, nr, table,
362 &sh_clk_div4_reparent_clk_ops);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000363}
Kuninori Morimoto9d626ec2012-10-30 20:06:55 -0700364
365/* FSI-DIV */
366static unsigned long fsidiv_recalc(struct clk *clk)
367{
368 u32 value;
369
370 value = __raw_readl(clk->mapping->base);
371
372 value >>= 16;
373 if (value < 2)
374 return clk->parent->rate;
375
376 return clk->parent->rate / value;
377}
378
379static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
380{
381 return clk_rate_div_range_round(clk, 1, 0xffff, rate);
382}
383
384static void fsidiv_disable(struct clk *clk)
385{
386 __raw_writel(0, clk->mapping->base);
387}
388
389static int fsidiv_enable(struct clk *clk)
390{
391 u32 value;
392
393 value = __raw_readl(clk->mapping->base) >> 16;
394 if (value < 2)
395 return 0;
396
397 __raw_writel((value << 16) | 0x3, clk->mapping->base);
398
399 return 0;
400}
401
402static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
403{
Kuninori Morimoto9d626ec2012-10-30 20:06:55 -0700404 int idx;
405
406 idx = (clk->parent->rate / rate) & 0xffff;
407 if (idx < 2)
408 __raw_writel(0, clk->mapping->base);
409 else
410 __raw_writel(idx << 16, clk->mapping->base);
411
412 return 0;
413}
414
415static struct sh_clk_ops fsidiv_clk_ops = {
416 .recalc = fsidiv_recalc,
417 .round_rate = fsidiv_round_rate,
418 .set_rate = fsidiv_set_rate,
419 .enable = fsidiv_enable,
420 .disable = fsidiv_disable,
421};
422
423int __init sh_clk_fsidiv_register(struct clk *clks, int nr)
424{
425 struct clk_mapping *map;
426 int i;
427
428 for (i = 0; i < nr; i++) {
429
430 map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL);
431 if (!map) {
432 pr_err("%s: unable to alloc memory\n", __func__);
433 return -ENOMEM;
434 }
435
436 /* clks[i].enable_reg came from SH_CLK_FSIDIV() */
437 map->phys = (phys_addr_t)clks[i].enable_reg;
438 map->len = 8;
439
440 clks[i].enable_reg = 0; /* remove .enable_reg */
441 clks[i].ops = &fsidiv_clk_ops;
442 clks[i].mapping = map;
443
444 clk_register(&clks[i]);
445 }
446
447 return 0;
448}