blob: 29ee5f7072a49b8a18cb4a3fb9ec4208bf8c6d92 [file] [log] [blame]
Paul Mundtde9186c2010-10-18 21:32:58 +09001/*
2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3 *
4 * Copyright (C) 2010 Magnus Damm
Paul Mundt4d6ddb02012-04-11 12:05:50 +09005 * Copyright (C) 2010 - 2012 Paul Mundt
Paul Mundtde9186c2010-10-18 21:32:58 +09006 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
Magnus Dammfa676ca2010-05-11 13:29:34 +000011#include <linux/clk.h>
12#include <linux/compiler.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/sh_clk.h>
16
Paul Mundt104fa612012-04-12 19:50:40 +090017static unsigned int sh_clk_read(struct clk *clk)
Magnus Dammfa676ca2010-05-11 13:29:34 +000018{
Paul Mundt4d6ddb02012-04-11 12:05:50 +090019 if (clk->flags & CLK_ENABLE_REG_8BIT)
Paul Mundt104fa612012-04-12 19:50:40 +090020 return ioread8(clk->mapped_reg);
Paul Mundt4d6ddb02012-04-11 12:05:50 +090021 else if (clk->flags & CLK_ENABLE_REG_16BIT)
Paul Mundt104fa612012-04-12 19:50:40 +090022 return ioread16(clk->mapped_reg);
Paul Mundt4d6ddb02012-04-11 12:05:50 +090023
Paul Mundt104fa612012-04-12 19:50:40 +090024 return ioread32(clk->mapped_reg);
25}
26
27static void sh_clk_write(int value, struct clk *clk)
28{
29 if (clk->flags & CLK_ENABLE_REG_8BIT)
30 iowrite8(value, clk->mapped_reg);
31 else if (clk->flags & CLK_ENABLE_REG_16BIT)
32 iowrite16(value, clk->mapped_reg);
33 else
34 iowrite32(value, clk->mapped_reg);
35}
36
37static int sh_clk_mstp_enable(struct clk *clk)
38{
39 sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +000040 return 0;
41}
42
Paul Mundt4d6ddb02012-04-11 12:05:50 +090043static void sh_clk_mstp_disable(struct clk *clk)
Magnus Dammfa676ca2010-05-11 13:29:34 +000044{
Paul Mundt104fa612012-04-12 19:50:40 +090045 sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +000046}
47
Paul Mundt4d6ddb02012-04-11 12:05:50 +090048static struct sh_clk_ops sh_clk_mstp_clk_ops = {
49 .enable = sh_clk_mstp_enable,
50 .disable = sh_clk_mstp_disable,
Magnus Dammfa676ca2010-05-11 13:29:34 +000051 .recalc = followparent_recalc,
52};
53
Paul Mundt4d6ddb02012-04-11 12:05:50 +090054int __init sh_clk_mstp_register(struct clk *clks, int nr)
Magnus Dammfa676ca2010-05-11 13:29:34 +000055{
56 struct clk *clkp;
57 int ret = 0;
58 int k;
59
60 for (k = 0; !ret && (k < nr); k++) {
61 clkp = clks + k;
Paul Mundt4d6ddb02012-04-11 12:05:50 +090062 clkp->ops = &sh_clk_mstp_clk_ops;
Magnus Dammfa676ca2010-05-11 13:29:34 +000063 ret |= clk_register(clkp);
64 }
65
66 return ret;
67}
68
Paul Mundta60977a2012-05-25 14:59:26 +090069/*
70 * Div/mult table lookup helpers
71 */
72static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
73{
74 return clk->priv;
75}
76
77static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
78{
79 return clk_to_div_table(clk)->div_mult_table;
80}
81
82/*
Paul Mundt75f5f8a2012-05-25 15:26:01 +090083 * Common div ops
84 */
85static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
86{
87 return clk_rate_table_round(clk, clk->freq_table, rate);
88}
89
90static unsigned long sh_clk_div_recalc(struct clk *clk)
91{
92 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
93 unsigned int idx;
94
95 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
96 table, clk->arch_flags ? &clk->arch_flags : NULL);
97
98 idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
99
100 return clk->freq_table[idx].frequency;
101}
102
Paul Mundt0fa22162012-05-25 15:52:10 +0900103static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
104{
105 struct clk_div_table *dt = clk_to_div_table(clk);
106 unsigned long value;
107 int idx;
108
109 idx = clk_rate_table_find(clk, clk->freq_table, rate);
110 if (idx < 0)
111 return idx;
112
113 value = sh_clk_read(clk);
114 value &= ~(clk->div_mask << clk->enable_bit);
115 value |= (idx << clk->enable_bit);
116 sh_clk_write(value, clk);
117
118 /* XXX: Should use a post-change notifier */
119 if (dt->kick)
120 dt->kick(clk);
121
122 return 0;
123}
124
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900125/*
Paul Mundta60977a2012-05-25 14:59:26 +0900126 * div6 support
127 */
Magnus Dammfa676ca2010-05-11 13:29:34 +0000128static int sh_clk_div6_divisors[64] = {
129 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
130 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
131 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
132 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
133};
134
Paul Mundta60977a2012-05-25 14:59:26 +0900135static struct clk_div_mult_table div6_div_mult_table = {
Magnus Dammfa676ca2010-05-11 13:29:34 +0000136 .divisors = sh_clk_div6_divisors,
137 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
138};
139
Paul Mundta60977a2012-05-25 14:59:26 +0900140static struct clk_div_table sh_clk_div6_table = {
141 .div_mult_table = &div6_div_mult_table,
142};
143
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000144static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
145{
Paul Mundta60977a2012-05-25 14:59:26 +0900146 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000147 u32 value;
148 int ret, i;
149
150 if (!clk->parent_table || !clk->parent_num)
151 return -EINVAL;
152
153 /* Search the parent */
154 for (i = 0; i < clk->parent_num; i++)
155 if (clk->parent_table[i] == parent)
156 break;
157
158 if (i == clk->parent_num)
159 return -ENODEV;
160
161 ret = clk_reparent(clk, parent);
162 if (ret < 0)
163 return ret;
164
Paul Mundt104fa612012-04-12 19:50:40 +0900165 value = sh_clk_read(clk) &
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000166 ~(((1 << clk->src_width) - 1) << clk->src_shift);
167
Paul Mundt104fa612012-04-12 19:50:40 +0900168 sh_clk_write(value | (i << clk->src_shift), clk);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000169
170 /* Rebuild the frequency table */
171 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
Kuninori Morimoto52c10ad2011-04-14 17:13:53 +0900172 table, NULL);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000173
174 return 0;
175}
176
Magnus Dammfa676ca2010-05-11 13:29:34 +0000177static int sh_clk_div6_enable(struct clk *clk)
178{
179 unsigned long value;
180 int ret;
181
Paul Mundt0fa22162012-05-25 15:52:10 +0900182 ret = sh_clk_div_set_rate(clk, clk->rate);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000183 if (ret == 0) {
Paul Mundt104fa612012-04-12 19:50:40 +0900184 value = sh_clk_read(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000185 value &= ~0x100; /* clear stop bit to enable clock */
Paul Mundt104fa612012-04-12 19:50:40 +0900186 sh_clk_write(value, clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000187 }
188 return ret;
189}
190
191static void sh_clk_div6_disable(struct clk *clk)
192{
193 unsigned long value;
194
Paul Mundt104fa612012-04-12 19:50:40 +0900195 value = sh_clk_read(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000196 value |= 0x100; /* stop clock */
Paul Mundt1111cc12012-05-25 15:21:43 +0900197 value |= clk->div_mask; /* VDIV bits must be non-zero, overwrite divider */
Paul Mundt104fa612012-04-12 19:50:40 +0900198 sh_clk_write(value, clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000199}
200
Magnus Damma0ec3602012-02-29 22:16:21 +0900201static struct sh_clk_ops sh_clk_div6_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900202 .recalc = sh_clk_div_recalc,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000203 .round_rate = sh_clk_div_round_rate,
Paul Mundt0fa22162012-05-25 15:52:10 +0900204 .set_rate = sh_clk_div_set_rate,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000205 .enable = sh_clk_div6_enable,
206 .disable = sh_clk_div6_disable,
207};
208
Magnus Damma0ec3602012-02-29 22:16:21 +0900209static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900210 .recalc = sh_clk_div_recalc,
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000211 .round_rate = sh_clk_div_round_rate,
Paul Mundt0fa22162012-05-25 15:52:10 +0900212 .set_rate = sh_clk_div_set_rate,
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000213 .enable = sh_clk_div6_enable,
214 .disable = sh_clk_div6_disable,
215 .set_parent = sh_clk_div6_set_parent,
216};
217
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800218static int __init sh_clk_init_parent(struct clk *clk)
219{
220 u32 val;
221
222 if (clk->parent)
223 return 0;
224
225 if (!clk->parent_table || !clk->parent_num)
226 return 0;
227
228 if (!clk->src_width) {
229 pr_err("sh_clk_init_parent: cannot select parent clock\n");
230 return -EINVAL;
231 }
232
Paul Mundt104fa612012-04-12 19:50:40 +0900233 val = (sh_clk_read(clk) >> clk->src_shift);
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800234 val &= (1 << clk->src_width) - 1;
235
236 if (val >= clk->parent_num) {
237 pr_err("sh_clk_init_parent: parent table size failed\n");
238 return -EINVAL;
239 }
240
Kuninori Morimoto64dea572012-01-19 01:00:40 -0800241 clk_reparent(clk, clk->parent_table[val]);
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800242 if (!clk->parent) {
243 pr_err("sh_clk_init_parent: unable to set parent");
244 return -EINVAL;
245 }
246
247 return 0;
248}
249
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000250static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
Magnus Damma0ec3602012-02-29 22:16:21 +0900251 struct sh_clk_ops *ops)
Magnus Dammfa676ca2010-05-11 13:29:34 +0000252{
253 struct clk *clkp;
254 void *freq_table;
Paul Mundta60977a2012-05-25 14:59:26 +0900255 struct clk_div_table *table = &sh_clk_div6_table;
256 int nr_divs = table->div_mult_table->nr_divisors;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000257 int freq_table_size = sizeof(struct cpufreq_frequency_table);
258 int ret = 0;
259 int k;
260
261 freq_table_size *= (nr_divs + 1);
262 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
263 if (!freq_table) {
264 pr_err("sh_clk_div6_register: unable to alloc memory\n");
265 return -ENOMEM;
266 }
267
268 for (k = 0; !ret && (k < nr); k++) {
269 clkp = clks + k;
270
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000271 clkp->ops = ops;
Paul Mundta60977a2012-05-25 14:59:26 +0900272 clkp->priv = table;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000273 clkp->freq_table = freq_table + (k * freq_table_size);
274 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
Kuninori Morimoto7784f4d2011-12-11 19:02:09 -0800275 ret = clk_register(clkp);
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800276 if (ret < 0)
277 break;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000278
Kuninori Morimoto7784f4d2011-12-11 19:02:09 -0800279 ret = sh_clk_init_parent(clkp);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000280 }
281
282 return ret;
283}
284
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000285int __init sh_clk_div6_register(struct clk *clks, int nr)
286{
287 return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops);
288}
289
290int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
291{
292 return sh_clk_div6_register_ops(clks, nr,
293 &sh_clk_div6_reparent_clk_ops);
294}
295
Paul Mundta60977a2012-05-25 14:59:26 +0900296/*
297 * div4 support
298 */
Magnus Dammfa676ca2010-05-11 13:29:34 +0000299static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
300{
Paul Mundta60977a2012-05-25 14:59:26 +0900301 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000302 u32 value;
303 int ret;
304
305 /* we really need a better way to determine parent index, but for
306 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
307 * no CLK_ENABLE_ON_INIT means external clock...
308 */
309
310 if (parent->flags & CLK_ENABLE_ON_INIT)
Paul Mundt104fa612012-04-12 19:50:40 +0900311 value = sh_clk_read(clk) & ~(1 << 7);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000312 else
Paul Mundt104fa612012-04-12 19:50:40 +0900313 value = sh_clk_read(clk) | (1 << 7);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000314
315 ret = clk_reparent(clk, parent);
316 if (ret < 0)
317 return ret;
318
Paul Mundt104fa612012-04-12 19:50:40 +0900319 sh_clk_write(value, clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000320
321 /* Rebiuld the frequency table */
322 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
323 table, &clk->arch_flags);
324
325 return 0;
326}
327
Magnus Dammfa676ca2010-05-11 13:29:34 +0000328static int sh_clk_div4_enable(struct clk *clk)
329{
Paul Mundt104fa612012-04-12 19:50:40 +0900330 sh_clk_write(sh_clk_read(clk) & ~(1 << 8), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000331 return 0;
332}
333
334static void sh_clk_div4_disable(struct clk *clk)
335{
Paul Mundt104fa612012-04-12 19:50:40 +0900336 sh_clk_write(sh_clk_read(clk) | (1 << 8), clk);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000337}
338
Magnus Damma0ec3602012-02-29 22:16:21 +0900339static struct sh_clk_ops sh_clk_div4_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900340 .recalc = sh_clk_div_recalc,
Paul Mundt0fa22162012-05-25 15:52:10 +0900341 .set_rate = sh_clk_div_set_rate,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000342 .round_rate = sh_clk_div_round_rate,
343};
344
Magnus Damma0ec3602012-02-29 22:16:21 +0900345static struct sh_clk_ops sh_clk_div4_enable_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900346 .recalc = sh_clk_div_recalc,
Paul Mundt0fa22162012-05-25 15:52:10 +0900347 .set_rate = sh_clk_div_set_rate,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000348 .round_rate = sh_clk_div_round_rate,
349 .enable = sh_clk_div4_enable,
350 .disable = sh_clk_div4_disable,
351};
352
Magnus Damma0ec3602012-02-29 22:16:21 +0900353static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900354 .recalc = sh_clk_div_recalc,
Paul Mundt0fa22162012-05-25 15:52:10 +0900355 .set_rate = sh_clk_div_set_rate,
Magnus Dammfa676ca2010-05-11 13:29:34 +0000356 .round_rate = sh_clk_div_round_rate,
357 .enable = sh_clk_div4_enable,
358 .disable = sh_clk_div4_disable,
359 .set_parent = sh_clk_div4_set_parent,
360};
361
362static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
Magnus Damma0ec3602012-02-29 22:16:21 +0900363 struct clk_div4_table *table, struct sh_clk_ops *ops)
Magnus Dammfa676ca2010-05-11 13:29:34 +0000364{
365 struct clk *clkp;
366 void *freq_table;
367 int nr_divs = table->div_mult_table->nr_divisors;
368 int freq_table_size = sizeof(struct cpufreq_frequency_table);
369 int ret = 0;
370 int k;
371
372 freq_table_size *= (nr_divs + 1);
373 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
374 if (!freq_table) {
375 pr_err("sh_clk_div4_register: unable to alloc memory\n");
376 return -ENOMEM;
377 }
378
379 for (k = 0; !ret && (k < nr); k++) {
380 clkp = clks + k;
381
382 clkp->ops = ops;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000383 clkp->priv = table;
384
385 clkp->freq_table = freq_table + (k * freq_table_size);
386 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
387
388 ret = clk_register(clkp);
389 }
390
391 return ret;
392}
393
394int __init sh_clk_div4_register(struct clk *clks, int nr,
395 struct clk_div4_table *table)
396{
397 return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
398}
399
400int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
401 struct clk_div4_table *table)
402{
403 return sh_clk_div4_register_ops(clks, nr, table,
404 &sh_clk_div4_enable_clk_ops);
405}
406
407int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
408 struct clk_div4_table *table)
409{
410 return sh_clk_div4_register_ops(clks, nr, table,
411 &sh_clk_div4_reparent_clk_ops);
412}