blob: eeaec796a395a083dc1f32df3d01a699fc366165 [file] [log] [blame]
Paul Mundtde9186c2010-10-18 21:32:58 +09001/*
2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3 *
4 * Copyright (C) 2010 Magnus Damm
Paul Mundt4d6ddb02012-04-11 12:05:50 +09005 * Copyright (C) 2010 - 2012 Paul Mundt
Paul Mundtde9186c2010-10-18 21:32:58 +09006 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
Magnus Dammfa676ca2010-05-11 13:29:34 +000011#include <linux/clk.h>
12#include <linux/compiler.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/sh_clk.h>
16
Paul Mundt764f4e42012-05-25 16:34:48 +090017#define CPG_CKSTP_BIT BIT(8)
18
Paul Mundt104fa612012-04-12 19:50:40 +090019static unsigned int sh_clk_read(struct clk *clk)
Magnus Dammfa676ca2010-05-11 13:29:34 +000020{
Paul Mundt4d6ddb02012-04-11 12:05:50 +090021 if (clk->flags & CLK_ENABLE_REG_8BIT)
Paul Mundt104fa612012-04-12 19:50:40 +090022 return ioread8(clk->mapped_reg);
Paul Mundt4d6ddb02012-04-11 12:05:50 +090023 else if (clk->flags & CLK_ENABLE_REG_16BIT)
Paul Mundt104fa612012-04-12 19:50:40 +090024 return ioread16(clk->mapped_reg);
Paul Mundt4d6ddb02012-04-11 12:05:50 +090025
Paul Mundt104fa612012-04-12 19:50:40 +090026 return ioread32(clk->mapped_reg);
27}
28
29static void sh_clk_write(int value, struct clk *clk)
30{
31 if (clk->flags & CLK_ENABLE_REG_8BIT)
32 iowrite8(value, clk->mapped_reg);
33 else if (clk->flags & CLK_ENABLE_REG_16BIT)
34 iowrite16(value, clk->mapped_reg);
35 else
36 iowrite32(value, clk->mapped_reg);
37}
38
39static int sh_clk_mstp_enable(struct clk *clk)
40{
41 sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +000042 return 0;
43}
44
Paul Mundt4d6ddb02012-04-11 12:05:50 +090045static void sh_clk_mstp_disable(struct clk *clk)
Magnus Dammfa676ca2010-05-11 13:29:34 +000046{
Paul Mundt104fa612012-04-12 19:50:40 +090047 sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +000048}
49
Paul Mundt4d6ddb02012-04-11 12:05:50 +090050static struct sh_clk_ops sh_clk_mstp_clk_ops = {
51 .enable = sh_clk_mstp_enable,
52 .disable = sh_clk_mstp_disable,
Magnus Dammfa676ca2010-05-11 13:29:34 +000053 .recalc = followparent_recalc,
54};
55
Paul Mundt4d6ddb02012-04-11 12:05:50 +090056int __init sh_clk_mstp_register(struct clk *clks, int nr)
Magnus Dammfa676ca2010-05-11 13:29:34 +000057{
58 struct clk *clkp;
59 int ret = 0;
60 int k;
61
62 for (k = 0; !ret && (k < nr); k++) {
63 clkp = clks + k;
Paul Mundt4d6ddb02012-04-11 12:05:50 +090064 clkp->ops = &sh_clk_mstp_clk_ops;
Magnus Dammfa676ca2010-05-11 13:29:34 +000065 ret |= clk_register(clkp);
66 }
67
68 return ret;
69}
70
Paul Mundta60977a2012-05-25 14:59:26 +090071/*
72 * Div/mult table lookup helpers
73 */
74static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
75{
76 return clk->priv;
77}
78
79static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
80{
81 return clk_to_div_table(clk)->div_mult_table;
82}
83
84/*
Paul Mundt75f5f8a2012-05-25 15:26:01 +090085 * Common div ops
86 */
87static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
88{
89 return clk_rate_table_round(clk, clk->freq_table, rate);
90}
91
92static unsigned long sh_clk_div_recalc(struct clk *clk)
93{
94 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
95 unsigned int idx;
96
97 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
98 table, clk->arch_flags ? &clk->arch_flags : NULL);
99
100 idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
101
102 return clk->freq_table[idx].frequency;
103}
104
Paul Mundt0fa22162012-05-25 15:52:10 +0900105static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
106{
107 struct clk_div_table *dt = clk_to_div_table(clk);
108 unsigned long value;
109 int idx;
110
111 idx = clk_rate_table_find(clk, clk->freq_table, rate);
112 if (idx < 0)
113 return idx;
114
115 value = sh_clk_read(clk);
116 value &= ~(clk->div_mask << clk->enable_bit);
117 value |= (idx << clk->enable_bit);
118 sh_clk_write(value, clk);
119
120 /* XXX: Should use a post-change notifier */
121 if (dt->kick)
122 dt->kick(clk);
123
124 return 0;
125}
126
Paul Mundt764f4e42012-05-25 16:34:48 +0900127static int sh_clk_div_enable(struct clk *clk)
128{
129 sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
130 return 0;
131}
132
133static void sh_clk_div_disable(struct clk *clk)
134{
135 unsigned int val;
136
137 val = sh_clk_read(clk);
138 val |= CPG_CKSTP_BIT;
139
140 /*
141 * div6 clocks require the divisor field to be non-zero or the
142 * above CKSTP toggle silently fails. Ensure that the divisor
143 * array is reset to its initial state on disable.
144 */
145 if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
146 val |= clk->div_mask;
147
148 sh_clk_write(val, clk);
149}
150
Paul Mundte3c87602012-05-25 16:43:42 +0900151static struct sh_clk_ops sh_clk_div_clk_ops = {
152 .recalc = sh_clk_div_recalc,
153 .set_rate = sh_clk_div_set_rate,
154 .round_rate = sh_clk_div_round_rate,
155};
156
157static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
158 .recalc = sh_clk_div_recalc,
159 .set_rate = sh_clk_div_set_rate,
160 .round_rate = sh_clk_div_round_rate,
161 .enable = sh_clk_div_enable,
162 .disable = sh_clk_div_disable,
163};
164
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900165/*
Paul Mundta60977a2012-05-25 14:59:26 +0900166 * div6 support
167 */
Magnus Dammfa676ca2010-05-11 13:29:34 +0000168static int sh_clk_div6_divisors[64] = {
169 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
170 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
171 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
172 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
173};
174
Paul Mundta60977a2012-05-25 14:59:26 +0900175static struct clk_div_mult_table div6_div_mult_table = {
Magnus Dammfa676ca2010-05-11 13:29:34 +0000176 .divisors = sh_clk_div6_divisors,
177 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
178};
179
Paul Mundta60977a2012-05-25 14:59:26 +0900180static struct clk_div_table sh_clk_div6_table = {
181 .div_mult_table = &div6_div_mult_table,
182};
183
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000184static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
185{
Paul Mundta60977a2012-05-25 14:59:26 +0900186 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000187 u32 value;
188 int ret, i;
189
190 if (!clk->parent_table || !clk->parent_num)
191 return -EINVAL;
192
193 /* Search the parent */
194 for (i = 0; i < clk->parent_num; i++)
195 if (clk->parent_table[i] == parent)
196 break;
197
198 if (i == clk->parent_num)
199 return -ENODEV;
200
201 ret = clk_reparent(clk, parent);
202 if (ret < 0)
203 return ret;
204
Paul Mundt104fa612012-04-12 19:50:40 +0900205 value = sh_clk_read(clk) &
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000206 ~(((1 << clk->src_width) - 1) << clk->src_shift);
207
Paul Mundt104fa612012-04-12 19:50:40 +0900208 sh_clk_write(value | (i << clk->src_shift), clk);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000209
210 /* Rebuild the frequency table */
211 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
Kuninori Morimoto52c10ad2011-04-14 17:13:53 +0900212 table, NULL);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000213
214 return 0;
215}
216
Magnus Damma0ec3602012-02-29 22:16:21 +0900217static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900218 .recalc = sh_clk_div_recalc,
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000219 .round_rate = sh_clk_div_round_rate,
Paul Mundt0fa22162012-05-25 15:52:10 +0900220 .set_rate = sh_clk_div_set_rate,
Paul Mundt764f4e42012-05-25 16:34:48 +0900221 .enable = sh_clk_div_enable,
222 .disable = sh_clk_div_disable,
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000223 .set_parent = sh_clk_div6_set_parent,
224};
225
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800226static int __init sh_clk_init_parent(struct clk *clk)
227{
228 u32 val;
229
230 if (clk->parent)
231 return 0;
232
233 if (!clk->parent_table || !clk->parent_num)
234 return 0;
235
236 if (!clk->src_width) {
237 pr_err("sh_clk_init_parent: cannot select parent clock\n");
238 return -EINVAL;
239 }
240
Paul Mundt104fa612012-04-12 19:50:40 +0900241 val = (sh_clk_read(clk) >> clk->src_shift);
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800242 val &= (1 << clk->src_width) - 1;
243
244 if (val >= clk->parent_num) {
245 pr_err("sh_clk_init_parent: parent table size failed\n");
246 return -EINVAL;
247 }
248
Kuninori Morimoto64dea572012-01-19 01:00:40 -0800249 clk_reparent(clk, clk->parent_table[val]);
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800250 if (!clk->parent) {
251 pr_err("sh_clk_init_parent: unable to set parent");
252 return -EINVAL;
253 }
254
255 return 0;
256}
257
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000258static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
Magnus Damma0ec3602012-02-29 22:16:21 +0900259 struct sh_clk_ops *ops)
Magnus Dammfa676ca2010-05-11 13:29:34 +0000260{
261 struct clk *clkp;
262 void *freq_table;
Paul Mundta60977a2012-05-25 14:59:26 +0900263 struct clk_div_table *table = &sh_clk_div6_table;
264 int nr_divs = table->div_mult_table->nr_divisors;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000265 int freq_table_size = sizeof(struct cpufreq_frequency_table);
266 int ret = 0;
267 int k;
268
269 freq_table_size *= (nr_divs + 1);
270 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
271 if (!freq_table) {
272 pr_err("sh_clk_div6_register: unable to alloc memory\n");
273 return -ENOMEM;
274 }
275
276 for (k = 0; !ret && (k < nr); k++) {
277 clkp = clks + k;
278
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000279 clkp->ops = ops;
Paul Mundta60977a2012-05-25 14:59:26 +0900280 clkp->priv = table;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000281 clkp->freq_table = freq_table + (k * freq_table_size);
282 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
Kuninori Morimoto7784f4d2011-12-11 19:02:09 -0800283 ret = clk_register(clkp);
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800284 if (ret < 0)
285 break;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000286
Kuninori Morimoto7784f4d2011-12-11 19:02:09 -0800287 ret = sh_clk_init_parent(clkp);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000288 }
289
290 return ret;
291}
292
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000293int __init sh_clk_div6_register(struct clk *clks, int nr)
294{
Paul Mundte3c87602012-05-25 16:43:42 +0900295 return sh_clk_div6_register_ops(clks, nr, &sh_clk_div_enable_clk_ops);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000296}
297
298int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
299{
300 return sh_clk_div6_register_ops(clks, nr,
301 &sh_clk_div6_reparent_clk_ops);
302}
303
Paul Mundta60977a2012-05-25 14:59:26 +0900304/*
305 * div4 support
306 */
Magnus Dammfa676ca2010-05-11 13:29:34 +0000307static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
308{
Paul Mundta60977a2012-05-25 14:59:26 +0900309 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000310 u32 value;
311 int ret;
312
313 /* we really need a better way to determine parent index, but for
314 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
315 * no CLK_ENABLE_ON_INIT means external clock...
316 */
317
318 if (parent->flags & CLK_ENABLE_ON_INIT)
Paul Mundt104fa612012-04-12 19:50:40 +0900319 value = sh_clk_read(clk) & ~(1 << 7);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000320 else
Paul Mundt104fa612012-04-12 19:50:40 +0900321 value = sh_clk_read(clk) | (1 << 7);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000322
323 ret = clk_reparent(clk, parent);
324 if (ret < 0)
325 return ret;
326
Paul Mundt104fa612012-04-12 19:50:40 +0900327 sh_clk_write(value, clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000328
329 /* Rebiuld the frequency table */
330 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
331 table, &clk->arch_flags);
332
333 return 0;
334}
335
Magnus Damma0ec3602012-02-29 22:16:21 +0900336static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900337 .recalc = sh_clk_div_recalc,
Paul Mundt0fa22162012-05-25 15:52:10 +0900338 .set_rate = sh_clk_div_set_rate,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000339 .round_rate = sh_clk_div_round_rate,
Paul Mundt764f4e42012-05-25 16:34:48 +0900340 .enable = sh_clk_div_enable,
341 .disable = sh_clk_div_disable,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000342 .set_parent = sh_clk_div4_set_parent,
343};
344
345static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
Magnus Damma0ec3602012-02-29 22:16:21 +0900346 struct clk_div4_table *table, struct sh_clk_ops *ops)
Magnus Dammfa676ca2010-05-11 13:29:34 +0000347{
348 struct clk *clkp;
349 void *freq_table;
350 int nr_divs = table->div_mult_table->nr_divisors;
351 int freq_table_size = sizeof(struct cpufreq_frequency_table);
352 int ret = 0;
353 int k;
354
355 freq_table_size *= (nr_divs + 1);
356 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
357 if (!freq_table) {
358 pr_err("sh_clk_div4_register: unable to alloc memory\n");
359 return -ENOMEM;
360 }
361
362 for (k = 0; !ret && (k < nr); k++) {
363 clkp = clks + k;
364
365 clkp->ops = ops;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000366 clkp->priv = table;
367
368 clkp->freq_table = freq_table + (k * freq_table_size);
369 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
370
371 ret = clk_register(clkp);
372 }
373
374 return ret;
375}
376
377int __init sh_clk_div4_register(struct clk *clks, int nr,
378 struct clk_div4_table *table)
379{
Paul Mundte3c87602012-05-25 16:43:42 +0900380 return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000381}
382
383int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
384 struct clk_div4_table *table)
385{
386 return sh_clk_div4_register_ops(clks, nr, table,
Paul Mundte3c87602012-05-25 16:43:42 +0900387 &sh_clk_div_enable_clk_ops);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000388}
389
390int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
391 struct clk_div4_table *table)
392{
393 return sh_clk_div4_register_ops(clks, nr, table,
394 &sh_clk_div4_reparent_clk_ops);
395}