blob: 6cbda4841589e6a9cb53c0185a25c4c1a14a1a37 [file] [log] [blame]
Paul Mundtde9186c2010-10-18 21:32:58 +09001/*
2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3 *
4 * Copyright (C) 2010 Magnus Damm
Paul Mundt4d6ddb02012-04-11 12:05:50 +09005 * Copyright (C) 2010 - 2012 Paul Mundt
Paul Mundtde9186c2010-10-18 21:32:58 +09006 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
Magnus Dammfa676ca2010-05-11 13:29:34 +000011#include <linux/clk.h>
12#include <linux/compiler.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/sh_clk.h>
16
Paul Mundt4d6ddb02012-04-11 12:05:50 +090017static int sh_clk_mstp_enable(struct clk *clk)
Magnus Dammfa676ca2010-05-11 13:29:34 +000018{
Paul Mundt4d6ddb02012-04-11 12:05:50 +090019 if (clk->flags & CLK_ENABLE_REG_8BIT)
20 iowrite8(ioread8(clk->mapped_reg) & ~(1 << clk->enable_bit),
21 clk->mapped_reg);
22 else if (clk->flags & CLK_ENABLE_REG_16BIT)
23 iowrite16(ioread16(clk->mapped_reg) & ~(1 << clk->enable_bit),
24 clk->mapped_reg);
25 else
26 iowrite32(ioread32(clk->mapped_reg) & ~(1 << clk->enable_bit),
27 clk->mapped_reg);
28
Magnus Dammfa676ca2010-05-11 13:29:34 +000029 return 0;
30}
31
Paul Mundt4d6ddb02012-04-11 12:05:50 +090032static void sh_clk_mstp_disable(struct clk *clk)
Magnus Dammfa676ca2010-05-11 13:29:34 +000033{
Paul Mundt4d6ddb02012-04-11 12:05:50 +090034 if (clk->flags & CLK_ENABLE_REG_8BIT)
35 iowrite8(ioread8(clk->mapped_reg) | (1 << clk->enable_bit),
36 clk->mapped_reg);
37 else if (clk->flags & CLK_ENABLE_REG_16BIT)
38 iowrite16(ioread16(clk->mapped_reg) | (1 << clk->enable_bit),
39 clk->mapped_reg);
40 else
41 iowrite32(ioread32(clk->mapped_reg) | (1 << clk->enable_bit),
42 clk->mapped_reg);
Magnus Dammfa676ca2010-05-11 13:29:34 +000043}
44
Paul Mundt4d6ddb02012-04-11 12:05:50 +090045static struct sh_clk_ops sh_clk_mstp_clk_ops = {
46 .enable = sh_clk_mstp_enable,
47 .disable = sh_clk_mstp_disable,
Magnus Dammfa676ca2010-05-11 13:29:34 +000048 .recalc = followparent_recalc,
49};
50
Paul Mundt4d6ddb02012-04-11 12:05:50 +090051int __init sh_clk_mstp_register(struct clk *clks, int nr)
Magnus Dammfa676ca2010-05-11 13:29:34 +000052{
53 struct clk *clkp;
54 int ret = 0;
55 int k;
56
57 for (k = 0; !ret && (k < nr); k++) {
58 clkp = clks + k;
Paul Mundt4d6ddb02012-04-11 12:05:50 +090059 clkp->ops = &sh_clk_mstp_clk_ops;
Magnus Dammfa676ca2010-05-11 13:29:34 +000060 ret |= clk_register(clkp);
61 }
62
63 return ret;
64}
65
66static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
67{
68 return clk_rate_table_round(clk, clk->freq_table, rate);
69}
70
71static int sh_clk_div6_divisors[64] = {
72 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
73 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
74 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
75 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
76};
77
78static struct clk_div_mult_table sh_clk_div6_table = {
79 .divisors = sh_clk_div6_divisors,
80 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
81};
82
83static unsigned long sh_clk_div6_recalc(struct clk *clk)
84{
85 struct clk_div_mult_table *table = &sh_clk_div6_table;
86 unsigned int idx;
87
88 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
89 table, NULL);
90
Magnus Damm2dacb972011-12-08 22:59:22 +090091 idx = ioread32(clk->mapped_reg) & 0x003f;
Magnus Dammfa676ca2010-05-11 13:29:34 +000092
93 return clk->freq_table[idx].frequency;
94}
95
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +000096static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
97{
98 struct clk_div_mult_table *table = &sh_clk_div6_table;
99 u32 value;
100 int ret, i;
101
102 if (!clk->parent_table || !clk->parent_num)
103 return -EINVAL;
104
105 /* Search the parent */
106 for (i = 0; i < clk->parent_num; i++)
107 if (clk->parent_table[i] == parent)
108 break;
109
110 if (i == clk->parent_num)
111 return -ENODEV;
112
113 ret = clk_reparent(clk, parent);
114 if (ret < 0)
115 return ret;
116
Magnus Damm2dacb972011-12-08 22:59:22 +0900117 value = ioread32(clk->mapped_reg) &
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000118 ~(((1 << clk->src_width) - 1) << clk->src_shift);
119
Magnus Damm2dacb972011-12-08 22:59:22 +0900120 iowrite32(value | (i << clk->src_shift), clk->mapped_reg);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000121
122 /* Rebuild the frequency table */
123 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
Kuninori Morimoto52c10ad2011-04-14 17:13:53 +0900124 table, NULL);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000125
126 return 0;
127}
128
Paul Mundt35a96c72010-11-15 18:18:32 +0900129static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
Magnus Dammfa676ca2010-05-11 13:29:34 +0000130{
131 unsigned long value;
132 int idx;
133
134 idx = clk_rate_table_find(clk, clk->freq_table, rate);
135 if (idx < 0)
136 return idx;
137
Magnus Damm2dacb972011-12-08 22:59:22 +0900138 value = ioread32(clk->mapped_reg);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000139 value &= ~0x3f;
140 value |= idx;
Magnus Damm2dacb972011-12-08 22:59:22 +0900141 iowrite32(value, clk->mapped_reg);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000142 return 0;
143}
144
145static int sh_clk_div6_enable(struct clk *clk)
146{
147 unsigned long value;
148 int ret;
149
Paul Mundtf278ea82010-11-19 16:40:35 +0900150 ret = sh_clk_div6_set_rate(clk, clk->rate);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000151 if (ret == 0) {
Magnus Damm2dacb972011-12-08 22:59:22 +0900152 value = ioread32(clk->mapped_reg);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000153 value &= ~0x100; /* clear stop bit to enable clock */
Magnus Damm2dacb972011-12-08 22:59:22 +0900154 iowrite32(value, clk->mapped_reg);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000155 }
156 return ret;
157}
158
159static void sh_clk_div6_disable(struct clk *clk)
160{
161 unsigned long value;
162
Magnus Damm2dacb972011-12-08 22:59:22 +0900163 value = ioread32(clk->mapped_reg);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000164 value |= 0x100; /* stop clock */
165 value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
Magnus Damm2dacb972011-12-08 22:59:22 +0900166 iowrite32(value, clk->mapped_reg);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000167}
168
Magnus Damma0ec3602012-02-29 22:16:21 +0900169static struct sh_clk_ops sh_clk_div6_clk_ops = {
Magnus Dammfa676ca2010-05-11 13:29:34 +0000170 .recalc = sh_clk_div6_recalc,
171 .round_rate = sh_clk_div_round_rate,
172 .set_rate = sh_clk_div6_set_rate,
173 .enable = sh_clk_div6_enable,
174 .disable = sh_clk_div6_disable,
175};
176
Magnus Damma0ec3602012-02-29 22:16:21 +0900177static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000178 .recalc = sh_clk_div6_recalc,
179 .round_rate = sh_clk_div_round_rate,
180 .set_rate = sh_clk_div6_set_rate,
181 .enable = sh_clk_div6_enable,
182 .disable = sh_clk_div6_disable,
183 .set_parent = sh_clk_div6_set_parent,
184};
185
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800186static int __init sh_clk_init_parent(struct clk *clk)
187{
188 u32 val;
189
190 if (clk->parent)
191 return 0;
192
193 if (!clk->parent_table || !clk->parent_num)
194 return 0;
195
196 if (!clk->src_width) {
197 pr_err("sh_clk_init_parent: cannot select parent clock\n");
198 return -EINVAL;
199 }
200
Magnus Damm2dacb972011-12-08 22:59:22 +0900201 val = (ioread32(clk->mapped_reg) >> clk->src_shift);
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800202 val &= (1 << clk->src_width) - 1;
203
204 if (val >= clk->parent_num) {
205 pr_err("sh_clk_init_parent: parent table size failed\n");
206 return -EINVAL;
207 }
208
Kuninori Morimoto64dea572012-01-19 01:00:40 -0800209 clk_reparent(clk, clk->parent_table[val]);
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800210 if (!clk->parent) {
211 pr_err("sh_clk_init_parent: unable to set parent");
212 return -EINVAL;
213 }
214
215 return 0;
216}
217
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000218static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
Magnus Damma0ec3602012-02-29 22:16:21 +0900219 struct sh_clk_ops *ops)
Magnus Dammfa676ca2010-05-11 13:29:34 +0000220{
221 struct clk *clkp;
222 void *freq_table;
223 int nr_divs = sh_clk_div6_table.nr_divisors;
224 int freq_table_size = sizeof(struct cpufreq_frequency_table);
225 int ret = 0;
226 int k;
227
228 freq_table_size *= (nr_divs + 1);
229 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
230 if (!freq_table) {
231 pr_err("sh_clk_div6_register: unable to alloc memory\n");
232 return -ENOMEM;
233 }
234
235 for (k = 0; !ret && (k < nr); k++) {
236 clkp = clks + k;
237
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000238 clkp->ops = ops;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000239 clkp->freq_table = freq_table + (k * freq_table_size);
240 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
Kuninori Morimoto7784f4d2011-12-11 19:02:09 -0800241 ret = clk_register(clkp);
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800242 if (ret < 0)
243 break;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000244
Kuninori Morimoto7784f4d2011-12-11 19:02:09 -0800245 ret = sh_clk_init_parent(clkp);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000246 }
247
248 return ret;
249}
250
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000251int __init sh_clk_div6_register(struct clk *clks, int nr)
252{
253 return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops);
254}
255
256int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
257{
258 return sh_clk_div6_register_ops(clks, nr,
259 &sh_clk_div6_reparent_clk_ops);
260}
261
Magnus Dammfa676ca2010-05-11 13:29:34 +0000262static unsigned long sh_clk_div4_recalc(struct clk *clk)
263{
264 struct clk_div4_table *d4t = clk->priv;
265 struct clk_div_mult_table *table = d4t->div_mult_table;
266 unsigned int idx;
267
268 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
269 table, &clk->arch_flags);
270
Magnus Damm0e027372011-12-08 22:59:13 +0900271 idx = (ioread32(clk->mapped_reg) >> clk->enable_bit) & 0x000f;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000272
273 return clk->freq_table[idx].frequency;
274}
275
276static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
277{
278 struct clk_div4_table *d4t = clk->priv;
279 struct clk_div_mult_table *table = d4t->div_mult_table;
280 u32 value;
281 int ret;
282
283 /* we really need a better way to determine parent index, but for
284 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
285 * no CLK_ENABLE_ON_INIT means external clock...
286 */
287
288 if (parent->flags & CLK_ENABLE_ON_INIT)
Magnus Damm0e027372011-12-08 22:59:13 +0900289 value = ioread32(clk->mapped_reg) & ~(1 << 7);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000290 else
Magnus Damm0e027372011-12-08 22:59:13 +0900291 value = ioread32(clk->mapped_reg) | (1 << 7);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000292
293 ret = clk_reparent(clk, parent);
294 if (ret < 0)
295 return ret;
296
Magnus Damm0e027372011-12-08 22:59:13 +0900297 iowrite32(value, clk->mapped_reg);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000298
299 /* Rebiuld the frequency table */
300 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
301 table, &clk->arch_flags);
302
303 return 0;
304}
305
Paul Mundt35a96c72010-11-15 18:18:32 +0900306static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
Magnus Dammfa676ca2010-05-11 13:29:34 +0000307{
308 struct clk_div4_table *d4t = clk->priv;
309 unsigned long value;
310 int idx = clk_rate_table_find(clk, clk->freq_table, rate);
311 if (idx < 0)
312 return idx;
313
Magnus Damm0e027372011-12-08 22:59:13 +0900314 value = ioread32(clk->mapped_reg);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000315 value &= ~(0xf << clk->enable_bit);
316 value |= (idx << clk->enable_bit);
Magnus Damm0e027372011-12-08 22:59:13 +0900317 iowrite32(value, clk->mapped_reg);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000318
319 if (d4t->kick)
320 d4t->kick(clk);
321
322 return 0;
323}
324
325static int sh_clk_div4_enable(struct clk *clk)
326{
Magnus Damm0e027372011-12-08 22:59:13 +0900327 iowrite32(ioread32(clk->mapped_reg) & ~(1 << 8), clk->mapped_reg);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000328 return 0;
329}
330
331static void sh_clk_div4_disable(struct clk *clk)
332{
Magnus Damm0e027372011-12-08 22:59:13 +0900333 iowrite32(ioread32(clk->mapped_reg) | (1 << 8), clk->mapped_reg);
Magnus Dammfa676ca2010-05-11 13:29:34 +0000334}
335
Magnus Damma0ec3602012-02-29 22:16:21 +0900336static struct sh_clk_ops sh_clk_div4_clk_ops = {
Magnus Dammfa676ca2010-05-11 13:29:34 +0000337 .recalc = sh_clk_div4_recalc,
338 .set_rate = sh_clk_div4_set_rate,
339 .round_rate = sh_clk_div_round_rate,
340};
341
Magnus Damma0ec3602012-02-29 22:16:21 +0900342static struct sh_clk_ops sh_clk_div4_enable_clk_ops = {
Magnus Dammfa676ca2010-05-11 13:29:34 +0000343 .recalc = sh_clk_div4_recalc,
344 .set_rate = sh_clk_div4_set_rate,
345 .round_rate = sh_clk_div_round_rate,
346 .enable = sh_clk_div4_enable,
347 .disable = sh_clk_div4_disable,
348};
349
Magnus Damma0ec3602012-02-29 22:16:21 +0900350static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
Magnus Dammfa676ca2010-05-11 13:29:34 +0000351 .recalc = sh_clk_div4_recalc,
352 .set_rate = sh_clk_div4_set_rate,
353 .round_rate = sh_clk_div_round_rate,
354 .enable = sh_clk_div4_enable,
355 .disable = sh_clk_div4_disable,
356 .set_parent = sh_clk_div4_set_parent,
357};
358
359static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
Magnus Damma0ec3602012-02-29 22:16:21 +0900360 struct clk_div4_table *table, struct sh_clk_ops *ops)
Magnus Dammfa676ca2010-05-11 13:29:34 +0000361{
362 struct clk *clkp;
363 void *freq_table;
364 int nr_divs = table->div_mult_table->nr_divisors;
365 int freq_table_size = sizeof(struct cpufreq_frequency_table);
366 int ret = 0;
367 int k;
368
369 freq_table_size *= (nr_divs + 1);
370 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
371 if (!freq_table) {
372 pr_err("sh_clk_div4_register: unable to alloc memory\n");
373 return -ENOMEM;
374 }
375
376 for (k = 0; !ret && (k < nr); k++) {
377 clkp = clks + k;
378
379 clkp->ops = ops;
Magnus Dammfa676ca2010-05-11 13:29:34 +0000380 clkp->priv = table;
381
382 clkp->freq_table = freq_table + (k * freq_table_size);
383 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
384
385 ret = clk_register(clkp);
386 }
387
388 return ret;
389}
390
391int __init sh_clk_div4_register(struct clk *clks, int nr,
392 struct clk_div4_table *table)
393{
394 return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
395}
396
397int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
398 struct clk_div4_table *table)
399{
400 return sh_clk_div4_register_ops(clks, nr, table,
401 &sh_clk_div4_enable_clk_ops);
402}
403
404int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
405 struct clk_div4_table *table)
406{
407 return sh_clk_div4_register_ops(clks, nr, table,
408 &sh_clk_div4_reparent_clk_ops);
409}