blob: 4c895ef1d40f52f891432bfbcbcda1b792f65b44 [file] [log] [blame]
Tony Lindgren3179a012005-11-10 14:26:48 +00001/*
2 * linux/arch/arm/mach-omap1/clock.c
3 *
Paul Walmsley51c19542010-02-22 22:09:26 -07004 * Copyright (C) 2004 - 2005, 2009-2010 Nokia Corporation
Tony Lindgren3179a012005-11-10 14:26:48 +00005 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
6 *
7 * Modified to use omap shared clock framework by
8 * Tony Lindgren <tony@atomide.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
Tony Lindgren3179a012005-11-10 14:26:48 +000014#include <linux/kernel.h>
Paul Walmsley1fe9be82012-09-27 10:33:33 -060015#include <linux/export.h>
Tony Lindgren3179a012005-11-10 14:26:48 +000016#include <linux/list.h>
17#include <linux/errno.h>
18#include <linux/err.h>
Tony Lindgren2c799ce2012-02-24 10:34:35 -080019#include <linux/io.h>
Russell Kingf8ce2542006-01-07 16:15:52 +000020#include <linux/clk.h>
Jean-Christop PLAGNIOL-VILLARD6d803ba2010-11-17 10:04:33 +010021#include <linux/clkdev.h>
Tony Lindgren3179a012005-11-10 14:26:48 +000022
Tony Lindgren90afd5c2006-09-25 13:27:20 +030023#include <asm/mach-types.h>
Tony Lindgren3179a012005-11-10 14:26:48 +000024
Tony Lindgrence491cf2009-10-20 09:40:47 -070025#include <plat/cpu.h>
26#include <plat/usb.h>
Russell King548d8492008-11-04 14:02:46 +000027
Tony Lindgren2c799ce2012-02-24 10:34:35 -080028#include <mach/hardware.h>
29
Tony Lindgren622297f2012-10-02 14:19:52 -070030#include "../plat-omap/sram.h"
31
Tony Lindgren2e3ee9f2012-02-24 10:34:34 -080032#include "iomap.h"
Tony Lindgren3179a012005-11-10 14:26:48 +000033#include "clock.h"
Paul Walmsley52650502009-12-08 16:29:38 -070034#include "opp.h"
35
36__u32 arm_idlect1_mask;
37struct clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
38
Paul Walmsley1fe9be82012-09-27 10:33:33 -060039static LIST_HEAD(clocks);
40static DEFINE_MUTEX(clocks_mutex);
41static DEFINE_SPINLOCK(clockfw_lock);
42
Paul Walmsleyfb2fc922010-07-26 16:34:28 -060043/*
Paul Walmsley52650502009-12-08 16:29:38 -070044 * Omap1 specific clock functions
Paul Walmsleyfb2fc922010-07-26 16:34:28 -060045 */
Tony Lindgren3179a012005-11-10 14:26:48 +000046
Paul Walmsley52650502009-12-08 16:29:38 -070047unsigned long omap1_uart_recalc(struct clk *clk)
Tony Lindgren3179a012005-11-10 14:26:48 +000048{
Tony Lindgrenfed415e2009-01-28 12:18:48 -070049 unsigned int val = __raw_readl(clk->enable_reg);
Russell King8b9dbc12009-02-12 10:12:59 +000050 return val & clk->enable_bit ? 48000000 : 12000000;
Tony Lindgren3179a012005-11-10 14:26:48 +000051}
52
Paul Walmsley52650502009-12-08 16:29:38 -070053unsigned long omap1_sossi_recalc(struct clk *clk)
Imre Deakdf2c2e72007-03-05 17:22:58 +020054{
55 u32 div = omap_readl(MOD_CONF_CTRL_1);
56
57 div = (div >> 17) & 0x7;
58 div++;
Russell King8b9dbc12009-02-12 10:12:59 +000059
60 return clk->parent->rate / div;
Imre Deakdf2c2e72007-03-05 17:22:58 +020061}
62
Tony Lindgren3179a012005-11-10 14:26:48 +000063static void omap1_clk_allow_idle(struct clk *clk)
64{
65 struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
66
67 if (!(clk->flags & CLOCK_IDLE_CONTROL))
68 return;
69
70 if (iclk->no_idle_count > 0 && !(--iclk->no_idle_count))
71 arm_idlect1_mask |= 1 << iclk->idlect_shift;
72}
73
74static void omap1_clk_deny_idle(struct clk *clk)
75{
76 struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
77
78 if (!(clk->flags & CLOCK_IDLE_CONTROL))
79 return;
80
81 if (iclk->no_idle_count++ == 0)
82 arm_idlect1_mask &= ~(1 << iclk->idlect_shift);
83}
84
85static __u16 verify_ckctl_value(__u16 newval)
86{
87 /* This function checks for following limitations set
88 * by the hardware (all conditions must be true):
89 * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
90 * ARM_CK >= TC_CK
91 * DSP_CK >= TC_CK
92 * DSPMMU_CK >= TC_CK
93 *
94 * In addition following rules are enforced:
95 * LCD_CK <= TC_CK
96 * ARMPER_CK <= TC_CK
97 *
98 * However, maximum frequencies are not checked for!
99 */
100 __u8 per_exp;
101 __u8 lcd_exp;
102 __u8 arm_exp;
103 __u8 dsp_exp;
104 __u8 tc_exp;
105 __u8 dspmmu_exp;
106
107 per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3;
108 lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3;
109 arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3;
110 dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3;
111 tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3;
112 dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3;
113
114 if (dspmmu_exp < dsp_exp)
115 dspmmu_exp = dsp_exp;
116 if (dspmmu_exp > dsp_exp+1)
117 dspmmu_exp = dsp_exp+1;
118 if (tc_exp < arm_exp)
119 tc_exp = arm_exp;
120 if (tc_exp < dspmmu_exp)
121 tc_exp = dspmmu_exp;
122 if (tc_exp > lcd_exp)
123 lcd_exp = tc_exp;
124 if (tc_exp > per_exp)
125 per_exp = tc_exp;
126
127 newval &= 0xf000;
128 newval |= per_exp << CKCTL_PERDIV_OFFSET;
129 newval |= lcd_exp << CKCTL_LCDDIV_OFFSET;
130 newval |= arm_exp << CKCTL_ARMDIV_OFFSET;
131 newval |= dsp_exp << CKCTL_DSPDIV_OFFSET;
132 newval |= tc_exp << CKCTL_TCDIV_OFFSET;
133 newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET;
134
135 return newval;
136}
137
138static int calc_dsor_exp(struct clk *clk, unsigned long rate)
139{
140 /* Note: If target frequency is too low, this function will return 4,
141 * which is invalid value. Caller must check for this value and act
142 * accordingly.
143 *
144 * Note: This function does not check for following limitations set
145 * by the hardware (all conditions must be true):
146 * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
147 * ARM_CK >= TC_CK
148 * DSP_CK >= TC_CK
149 * DSPMMU_CK >= TC_CK
150 */
151 unsigned long realrate;
152 struct clk * parent;
153 unsigned dsor_exp;
154
Tony Lindgren3179a012005-11-10 14:26:48 +0000155 parent = clk->parent;
Russell Kingc0fc18c52008-09-05 15:10:27 +0100156 if (unlikely(parent == NULL))
Tony Lindgren3179a012005-11-10 14:26:48 +0000157 return -EIO;
158
159 realrate = parent->rate;
160 for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
161 if (realrate <= rate)
162 break;
163
164 realrate /= 2;
165 }
166
167 return dsor_exp;
168}
169
Paul Walmsley52650502009-12-08 16:29:38 -0700170unsigned long omap1_ckctl_recalc(struct clk *clk)
Tony Lindgren3179a012005-11-10 14:26:48 +0000171{
Tony Lindgren3179a012005-11-10 14:26:48 +0000172 /* Calculate divisor encoded as 2-bit exponent */
Russell King8b9dbc12009-02-12 10:12:59 +0000173 int dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
Tony Lindgren3179a012005-11-10 14:26:48 +0000174
Russell King8b9dbc12009-02-12 10:12:59 +0000175 return clk->parent->rate / dsor;
Tony Lindgren3179a012005-11-10 14:26:48 +0000176}
177
Paul Walmsley52650502009-12-08 16:29:38 -0700178unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk)
Tony Lindgren3179a012005-11-10 14:26:48 +0000179{
180 int dsor;
181
182 /* Calculate divisor encoded as 2-bit exponent
183 *
184 * The clock control bits are in DSP domain,
185 * so api_ck is needed for access.
186 * Note that DSP_CKCTL virt addr = phys addr, so
187 * we must use __raw_readw() instead of omap_readw().
188 */
Paul Walmsley52650502009-12-08 16:29:38 -0700189 omap1_clk_enable(api_ck_p);
Tony Lindgren3179a012005-11-10 14:26:48 +0000190 dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
Paul Walmsley52650502009-12-08 16:29:38 -0700191 omap1_clk_disable(api_ck_p);
Tony Lindgren3179a012005-11-10 14:26:48 +0000192
Russell King8b9dbc12009-02-12 10:12:59 +0000193 return clk->parent->rate / dsor;
Tony Lindgren3179a012005-11-10 14:26:48 +0000194}
195
196/* MPU virtual clock functions */
Paul Walmsley52650502009-12-08 16:29:38 -0700197int omap1_select_table_rate(struct clk *clk, unsigned long rate)
Tony Lindgren3179a012005-11-10 14:26:48 +0000198{
199 /* Find the highest supported frequency <= rate and switch to it */
200 struct mpu_rate * ptr;
Paul Walmsleyeeb37112012-04-13 06:34:32 -0600201 unsigned long ref_rate;
Tony Lindgren3179a012005-11-10 14:26:48 +0000202
Paul Walmsleyaf022fa2010-01-19 17:30:55 -0700203 ref_rate = ck_ref_p->rate;
Paul Walmsley52650502009-12-08 16:29:38 -0700204
205 for (ptr = omap1_rate_table; ptr->rate; ptr++) {
Janusz Krzysztofik24ce2702011-12-08 18:01:41 -0800206 if (!(ptr->flags & cpu_mask))
207 continue;
208
Paul Walmsley52650502009-12-08 16:29:38 -0700209 if (ptr->xtal != ref_rate)
Tony Lindgren3179a012005-11-10 14:26:48 +0000210 continue;
211
Tony Lindgren3179a012005-11-10 14:26:48 +0000212 /* Can check only after xtal frequency check */
213 if (ptr->rate <= rate)
214 break;
215 }
216
217 if (!ptr->rate)
218 return -EINVAL;
219
220 /*
221 * In most cases we should not need to reprogram DPLL.
222 * Reprogramming the DPLL is tricky, it must be done from SRAM.
223 */
Janusz Krzysztofikf9e59082011-12-01 22:16:26 +0100224 omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
Tony Lindgren3179a012005-11-10 14:26:48 +0000225
Paul Walmsley52650502009-12-08 16:29:38 -0700226 /* XXX Do we need to recalculate the tree below DPLL1 at this point? */
227 ck_dpll1_p->rate = ptr->pll_rate;
228
Tony Lindgren3179a012005-11-10 14:26:48 +0000229 return 0;
230}
231
Paul Walmsley52650502009-12-08 16:29:38 -0700232int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate)
Tony Lindgren3179a012005-11-10 14:26:48 +0000233{
Russell Kingd5e60722009-02-08 16:07:46 +0000234 int dsor_exp;
235 u16 regval;
Tony Lindgren3179a012005-11-10 14:26:48 +0000236
Russell Kingd5e60722009-02-08 16:07:46 +0000237 dsor_exp = calc_dsor_exp(clk, rate);
238 if (dsor_exp > 3)
239 dsor_exp = -EINVAL;
240 if (dsor_exp < 0)
241 return dsor_exp;
Tony Lindgren3179a012005-11-10 14:26:48 +0000242
Russell Kingd5e60722009-02-08 16:07:46 +0000243 regval = __raw_readw(DSP_CKCTL);
244 regval &= ~(3 << clk->rate_offset);
245 regval |= dsor_exp << clk->rate_offset;
246 __raw_writew(regval, DSP_CKCTL);
247 clk->rate = clk->parent->rate / (1 << dsor_exp);
Tony Lindgren3179a012005-11-10 14:26:48 +0000248
Russell Kingd5e60722009-02-08 16:07:46 +0000249 return 0;
250}
251
Paul Walmsley52650502009-12-08 16:29:38 -0700252long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate)
Russell Kingd5e60722009-02-08 16:07:46 +0000253{
254 int dsor_exp = calc_dsor_exp(clk, rate);
255 if (dsor_exp < 0)
256 return dsor_exp;
257 if (dsor_exp > 3)
258 dsor_exp = 3;
259 return clk->parent->rate / (1 << dsor_exp);
260}
261
Paul Walmsley52650502009-12-08 16:29:38 -0700262int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate)
Russell Kingd5e60722009-02-08 16:07:46 +0000263{
264 int dsor_exp;
265 u16 regval;
266
267 dsor_exp = calc_dsor_exp(clk, rate);
268 if (dsor_exp > 3)
269 dsor_exp = -EINVAL;
270 if (dsor_exp < 0)
271 return dsor_exp;
272
273 regval = omap_readw(ARM_CKCTL);
274 regval &= ~(3 << clk->rate_offset);
275 regval |= dsor_exp << clk->rate_offset;
276 regval = verify_ckctl_value(regval);
277 omap_writew(regval, ARM_CKCTL);
278 clk->rate = clk->parent->rate / (1 << dsor_exp);
279 return 0;
Tony Lindgren3179a012005-11-10 14:26:48 +0000280}
281
Paul Walmsley52650502009-12-08 16:29:38 -0700282long omap1_round_to_table_rate(struct clk *clk, unsigned long rate)
Tony Lindgren3179a012005-11-10 14:26:48 +0000283{
284 /* Find the highest supported frequency <= rate */
285 struct mpu_rate * ptr;
Paul Walmsley52650502009-12-08 16:29:38 -0700286 long highest_rate;
287 unsigned long ref_rate;
288
Paul Walmsleyaf022fa2010-01-19 17:30:55 -0700289 ref_rate = ck_ref_p->rate;
Tony Lindgren3179a012005-11-10 14:26:48 +0000290
Tony Lindgren3179a012005-11-10 14:26:48 +0000291 highest_rate = -EINVAL;
292
Paul Walmsley52650502009-12-08 16:29:38 -0700293 for (ptr = omap1_rate_table; ptr->rate; ptr++) {
Janusz Krzysztofik24ce2702011-12-08 18:01:41 -0800294 if (!(ptr->flags & cpu_mask))
295 continue;
296
Paul Walmsley52650502009-12-08 16:29:38 -0700297 if (ptr->xtal != ref_rate)
Tony Lindgren3179a012005-11-10 14:26:48 +0000298 continue;
299
300 highest_rate = ptr->rate;
301
302 /* Can check only after xtal frequency check */
303 if (ptr->rate <= rate)
304 break;
305 }
306
307 return highest_rate;
308}
309
310static unsigned calc_ext_dsor(unsigned long rate)
311{
312 unsigned dsor;
313
314 /* MCLK and BCLK divisor selection is not linear:
315 * freq = 96MHz / dsor
316 *
317 * RATIO_SEL range: dsor <-> RATIO_SEL
318 * 0..6: (RATIO_SEL+2) <-> (dsor-2)
319 * 6..48: (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6)
320 * Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9
321 * can not be used.
322 */
323 for (dsor = 2; dsor < 96; ++dsor) {
324 if ((dsor & 1) && dsor > 8)
Tony Lindgrenb824efa2006-04-02 17:46:20 +0100325 continue;
Tony Lindgren3179a012005-11-10 14:26:48 +0000326 if (rate >= 96000000 / dsor)
327 break;
328 }
329 return dsor;
330}
331
Paul Walmsley52650502009-12-08 16:29:38 -0700332/* XXX Only needed on 1510 */
333int omap1_set_uart_rate(struct clk *clk, unsigned long rate)
Tony Lindgren3179a012005-11-10 14:26:48 +0000334{
335 unsigned int val;
336
Tony Lindgrenfed415e2009-01-28 12:18:48 -0700337 val = __raw_readl(clk->enable_reg);
Tony Lindgren3179a012005-11-10 14:26:48 +0000338 if (rate == 12000000)
339 val &= ~(1 << clk->enable_bit);
340 else if (rate == 48000000)
341 val |= (1 << clk->enable_bit);
342 else
343 return -EINVAL;
Tony Lindgrenfed415e2009-01-28 12:18:48 -0700344 __raw_writel(val, clk->enable_reg);
Tony Lindgren3179a012005-11-10 14:26:48 +0000345 clk->rate = rate;
346
347 return 0;
348}
349
350/* External clock (MCLK & BCLK) functions */
Paul Walmsley52650502009-12-08 16:29:38 -0700351int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate)
Tony Lindgren3179a012005-11-10 14:26:48 +0000352{
353 unsigned dsor;
354 __u16 ratio_bits;
355
356 dsor = calc_ext_dsor(rate);
357 clk->rate = 96000000 / dsor;
358 if (dsor > 8)
359 ratio_bits = ((dsor - 8) / 2 + 6) << 2;
360 else
361 ratio_bits = (dsor - 2) << 2;
362
Tony Lindgrenfed415e2009-01-28 12:18:48 -0700363 ratio_bits |= __raw_readw(clk->enable_reg) & ~0xfd;
364 __raw_writew(ratio_bits, clk->enable_reg);
Tony Lindgren3179a012005-11-10 14:26:48 +0000365
366 return 0;
367}
368
Paul Walmsley52650502009-12-08 16:29:38 -0700369int omap1_set_sossi_rate(struct clk *clk, unsigned long rate)
Imre Deakdf2c2e72007-03-05 17:22:58 +0200370{
371 u32 l;
372 int div;
373 unsigned long p_rate;
374
375 p_rate = clk->parent->rate;
376 /* Round towards slower frequency */
377 div = (p_rate + rate - 1) / rate;
378 div--;
379 if (div < 0 || div > 7)
380 return -EINVAL;
381
382 l = omap_readl(MOD_CONF_CTRL_1);
383 l &= ~(7 << 17);
384 l |= div << 17;
385 omap_writel(l, MOD_CONF_CTRL_1);
386
387 clk->rate = p_rate / (div + 1);
Imre Deakdf2c2e72007-03-05 17:22:58 +0200388
389 return 0;
390}
391
Paul Walmsley52650502009-12-08 16:29:38 -0700392long omap1_round_ext_clk_rate(struct clk *clk, unsigned long rate)
Tony Lindgren3179a012005-11-10 14:26:48 +0000393{
394 return 96000000 / calc_ext_dsor(rate);
395}
396
Paul Walmsley52650502009-12-08 16:29:38 -0700397void omap1_init_ext_clk(struct clk *clk)
Tony Lindgren3179a012005-11-10 14:26:48 +0000398{
399 unsigned dsor;
400 __u16 ratio_bits;
401
402 /* Determine current rate and ensure clock is based on 96MHz APLL */
Tony Lindgrenfed415e2009-01-28 12:18:48 -0700403 ratio_bits = __raw_readw(clk->enable_reg) & ~1;
404 __raw_writew(ratio_bits, clk->enable_reg);
Tony Lindgren3179a012005-11-10 14:26:48 +0000405
406 ratio_bits = (ratio_bits & 0xfc) >> 2;
407 if (ratio_bits > 6)
408 dsor = (ratio_bits - 6) * 2 + 8;
409 else
410 dsor = ratio_bits + 2;
411
412 clk-> rate = 96000000 / dsor;
413}
414
Paul Walmsley52650502009-12-08 16:29:38 -0700415int omap1_clk_enable(struct clk *clk)
Tony Lindgren3179a012005-11-10 14:26:48 +0000416{
417 int ret = 0;
Tony Lindgren3179a012005-11-10 14:26:48 +0000418
Russell King3ef48fac2009-04-05 12:27:24 +0100419 if (clk->usecount++ == 0) {
420 if (clk->parent) {
421 ret = omap1_clk_enable(clk->parent);
422 if (ret)
423 goto err;
Tony Lindgren3179a012005-11-10 14:26:48 +0000424
425 if (clk->flags & CLOCK_NO_IDLE_PARENT)
Dirk Behme6f9c92f2006-12-06 17:13:51 -0800426 omap1_clk_deny_idle(clk->parent);
Tony Lindgren3179a012005-11-10 14:26:48 +0000427 }
428
Russell King548d8492008-11-04 14:02:46 +0000429 ret = clk->ops->enable(clk);
Russell King3ef48fac2009-04-05 12:27:24 +0100430 if (ret) {
431 if (clk->parent)
432 omap1_clk_disable(clk->parent);
433 goto err;
Tony Lindgren3179a012005-11-10 14:26:48 +0000434 }
435 }
Russell King3ef48fac2009-04-05 12:27:24 +0100436 return ret;
Tony Lindgren3179a012005-11-10 14:26:48 +0000437
Russell King3ef48fac2009-04-05 12:27:24 +0100438err:
439 clk->usecount--;
Tony Lindgren3179a012005-11-10 14:26:48 +0000440 return ret;
441}
442
Paul Walmsley52650502009-12-08 16:29:38 -0700443void omap1_clk_disable(struct clk *clk)
Tony Lindgren3179a012005-11-10 14:26:48 +0000444{
445 if (clk->usecount > 0 && !(--clk->usecount)) {
Russell King548d8492008-11-04 14:02:46 +0000446 clk->ops->disable(clk);
Tony Lindgren3179a012005-11-10 14:26:48 +0000447 if (likely(clk->parent)) {
Tony Lindgren10b55792006-01-17 15:30:42 -0800448 omap1_clk_disable(clk->parent);
Tony Lindgren3179a012005-11-10 14:26:48 +0000449 if (clk->flags & CLOCK_NO_IDLE_PARENT)
Dirk Behme6f9c92f2006-12-06 17:13:51 -0800450 omap1_clk_allow_idle(clk->parent);
Tony Lindgren3179a012005-11-10 14:26:48 +0000451 }
452 }
453}
454
Tony Lindgren10b55792006-01-17 15:30:42 -0800455static int omap1_clk_enable_generic(struct clk *clk)
Tony Lindgren3179a012005-11-10 14:26:48 +0000456{
457 __u16 regval16;
458 __u32 regval32;
459
Russell Kingc0fc18c52008-09-05 15:10:27 +0100460 if (unlikely(clk->enable_reg == NULL)) {
Tony Lindgren3179a012005-11-10 14:26:48 +0000461 printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
462 clk->name);
Dirk Behme6f9c92f2006-12-06 17:13:51 -0800463 return -EINVAL;
Tony Lindgren3179a012005-11-10 14:26:48 +0000464 }
465
466 if (clk->flags & ENABLE_REG_32BIT) {
Tony Lindgrenfed415e2009-01-28 12:18:48 -0700467 regval32 = __raw_readl(clk->enable_reg);
468 regval32 |= (1 << clk->enable_bit);
469 __raw_writel(regval32, clk->enable_reg);
Tony Lindgren3179a012005-11-10 14:26:48 +0000470 } else {
Tony Lindgrenfed415e2009-01-28 12:18:48 -0700471 regval16 = __raw_readw(clk->enable_reg);
472 regval16 |= (1 << clk->enable_bit);
473 __raw_writew(regval16, clk->enable_reg);
Tony Lindgren3179a012005-11-10 14:26:48 +0000474 }
475
Dirk Behme6f9c92f2006-12-06 17:13:51 -0800476 return 0;
Tony Lindgren3179a012005-11-10 14:26:48 +0000477}
478
Tony Lindgren10b55792006-01-17 15:30:42 -0800479static void omap1_clk_disable_generic(struct clk *clk)
Tony Lindgren3179a012005-11-10 14:26:48 +0000480{
481 __u16 regval16;
482 __u32 regval32;
483
Russell Kingc0fc18c52008-09-05 15:10:27 +0100484 if (clk->enable_reg == NULL)
Tony Lindgren3179a012005-11-10 14:26:48 +0000485 return;
486
487 if (clk->flags & ENABLE_REG_32BIT) {
Tony Lindgrenfed415e2009-01-28 12:18:48 -0700488 regval32 = __raw_readl(clk->enable_reg);
489 regval32 &= ~(1 << clk->enable_bit);
490 __raw_writel(regval32, clk->enable_reg);
Tony Lindgren3179a012005-11-10 14:26:48 +0000491 } else {
Tony Lindgrenfed415e2009-01-28 12:18:48 -0700492 regval16 = __raw_readw(clk->enable_reg);
493 regval16 &= ~(1 << clk->enable_bit);
494 __raw_writew(regval16, clk->enable_reg);
Tony Lindgren3179a012005-11-10 14:26:48 +0000495 }
496}
497
Paul Walmsley52650502009-12-08 16:29:38 -0700498const struct clkops clkops_generic = {
499 .enable = omap1_clk_enable_generic,
500 .disable = omap1_clk_disable_generic,
Russell King548d8492008-11-04 14:02:46 +0000501};
502
Paul Walmsley52650502009-12-08 16:29:38 -0700503static int omap1_clk_enable_dsp_domain(struct clk *clk)
504{
505 int retval;
506
507 retval = omap1_clk_enable(api_ck_p);
508 if (!retval) {
509 retval = omap1_clk_enable_generic(clk);
510 omap1_clk_disable(api_ck_p);
511 }
512
513 return retval;
514}
515
516static void omap1_clk_disable_dsp_domain(struct clk *clk)
517{
518 if (omap1_clk_enable(api_ck_p) == 0) {
519 omap1_clk_disable_generic(clk);
520 omap1_clk_disable(api_ck_p);
521 }
522}
523
524const struct clkops clkops_dspck = {
525 .enable = omap1_clk_enable_dsp_domain,
526 .disable = omap1_clk_disable_dsp_domain,
527};
528
Paul Walmsleyfb2fc922010-07-26 16:34:28 -0600529/* XXX SYSC register handling does not belong in the clock framework */
530static int omap1_clk_enable_uart_functional_16xx(struct clk *clk)
Paul Walmsley52650502009-12-08 16:29:38 -0700531{
532 int ret;
533 struct uart_clk *uclk;
534
535 ret = omap1_clk_enable_generic(clk);
536 if (ret == 0) {
537 /* Set smart idle acknowledgement mode */
538 uclk = (struct uart_clk *)clk;
539 omap_writeb((omap_readb(uclk->sysc_addr) & ~0x10) | 8,
540 uclk->sysc_addr);
541 }
542
543 return ret;
544}
545
Paul Walmsleyfb2fc922010-07-26 16:34:28 -0600546/* XXX SYSC register handling does not belong in the clock framework */
547static void omap1_clk_disable_uart_functional_16xx(struct clk *clk)
Paul Walmsley52650502009-12-08 16:29:38 -0700548{
549 struct uart_clk *uclk;
550
551 /* Set force idle acknowledgement mode */
552 uclk = (struct uart_clk *)clk;
553 omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr);
554
555 omap1_clk_disable_generic(clk);
556}
557
Paul Walmsleyfb2fc922010-07-26 16:34:28 -0600558/* XXX SYSC register handling does not belong in the clock framework */
559const struct clkops clkops_uart_16xx = {
560 .enable = omap1_clk_enable_uart_functional_16xx,
561 .disable = omap1_clk_disable_uart_functional_16xx,
Paul Walmsley52650502009-12-08 16:29:38 -0700562};
563
564long omap1_clk_round_rate(struct clk *clk, unsigned long rate)
Tony Lindgren3179a012005-11-10 14:26:48 +0000565{
Russell Kingc0fc18c52008-09-05 15:10:27 +0100566 if (clk->round_rate != NULL)
Tony Lindgren3179a012005-11-10 14:26:48 +0000567 return clk->round_rate(clk, rate);
568
569 return clk->rate;
570}
571
Paul Walmsley52650502009-12-08 16:29:38 -0700572int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
Tony Lindgren3179a012005-11-10 14:26:48 +0000573{
574 int ret = -EINVAL;
Tony Lindgren3179a012005-11-10 14:26:48 +0000575
576 if (clk->set_rate)
577 ret = clk->set_rate(clk, rate);
Tony Lindgren3179a012005-11-10 14:26:48 +0000578 return ret;
579}
580
Paul Walmsleyfb2fc922010-07-26 16:34:28 -0600581/*
Tony Lindgren3179a012005-11-10 14:26:48 +0000582 * Omap1 clock reset and init functions
Paul Walmsleyfb2fc922010-07-26 16:34:28 -0600583 */
Tony Lindgren3179a012005-11-10 14:26:48 +0000584
585#ifdef CONFIG_OMAP_RESET_CLOCKS
Tony Lindgren3179a012005-11-10 14:26:48 +0000586
Felipe Balbi5838bb62010-05-20 12:31:04 -0600587void omap1_clk_disable_unused(struct clk *clk)
Tony Lindgren3179a012005-11-10 14:26:48 +0000588{
Tony Lindgren3179a012005-11-10 14:26:48 +0000589 __u32 regval32;
590
Tony Lindgren90afd5c2006-09-25 13:27:20 +0300591 /* Clocks in the DSP domain need api_ck. Just assume bootloader
592 * has not enabled any DSP clocks */
Russell King397fcaf2008-09-05 15:46:19 +0100593 if (clk->enable_reg == DSP_IDLECT2) {
Paul Walmsley7852ec02012-07-26 00:54:26 -0600594 pr_info("Skipping reset check for DSP domain clock \"%s\"\n",
595 clk->name);
Tony Lindgren90afd5c2006-09-25 13:27:20 +0300596 return;
Tony Lindgren3179a012005-11-10 14:26:48 +0000597 }
598
Tony Lindgren90afd5c2006-09-25 13:27:20 +0300599 /* Is the clock already disabled? */
Tony Lindgrenfed415e2009-01-28 12:18:48 -0700600 if (clk->flags & ENABLE_REG_32BIT)
601 regval32 = __raw_readl(clk->enable_reg);
602 else
603 regval32 = __raw_readw(clk->enable_reg);
Tony Lindgren90afd5c2006-09-25 13:27:20 +0300604
605 if ((regval32 & (1 << clk->enable_bit)) == 0)
606 return;
607
Tony Lindgren90afd5c2006-09-25 13:27:20 +0300608 printk(KERN_INFO "Disabling unused clock \"%s\"... ", clk->name);
Russell King548d8492008-11-04 14:02:46 +0000609 clk->ops->disable(clk);
Tony Lindgren90afd5c2006-09-25 13:27:20 +0300610 printk(" done\n");
Tony Lindgren3179a012005-11-10 14:26:48 +0000611}
Tony Lindgren3179a012005-11-10 14:26:48 +0000612
Tony Lindgren3179a012005-11-10 14:26:48 +0000613#endif
Paul Walmsley1fe9be82012-09-27 10:33:33 -0600614
615
616int clk_enable(struct clk *clk)
617{
618 unsigned long flags;
619 int ret;
620
621 if (clk == NULL || IS_ERR(clk))
622 return -EINVAL;
623
624 spin_lock_irqsave(&clockfw_lock, flags);
625 ret = omap1_clk_enable(clk);
626 spin_unlock_irqrestore(&clockfw_lock, flags);
627
628 return ret;
629}
630EXPORT_SYMBOL(clk_enable);
631
632void clk_disable(struct clk *clk)
633{
634 unsigned long flags;
635
636 if (clk == NULL || IS_ERR(clk))
637 return;
638
639 spin_lock_irqsave(&clockfw_lock, flags);
640 if (clk->usecount == 0) {
641 pr_err("Trying disable clock %s with 0 usecount\n",
642 clk->name);
643 WARN_ON(1);
644 goto out;
645 }
646
647 omap1_clk_disable(clk);
648
649out:
650 spin_unlock_irqrestore(&clockfw_lock, flags);
651}
652EXPORT_SYMBOL(clk_disable);
653
654unsigned long clk_get_rate(struct clk *clk)
655{
656 unsigned long flags;
657 unsigned long ret;
658
659 if (clk == NULL || IS_ERR(clk))
660 return 0;
661
662 spin_lock_irqsave(&clockfw_lock, flags);
663 ret = clk->rate;
664 spin_unlock_irqrestore(&clockfw_lock, flags);
665
666 return ret;
667}
668EXPORT_SYMBOL(clk_get_rate);
669
670/*
671 * Optional clock functions defined in include/linux/clk.h
672 */
673
674long clk_round_rate(struct clk *clk, unsigned long rate)
675{
676 unsigned long flags;
677 long ret;
678
679 if (clk == NULL || IS_ERR(clk))
680 return 0;
681
682 spin_lock_irqsave(&clockfw_lock, flags);
683 ret = omap1_clk_round_rate(clk, rate);
684 spin_unlock_irqrestore(&clockfw_lock, flags);
685
686 return ret;
687}
688EXPORT_SYMBOL(clk_round_rate);
689
690int clk_set_rate(struct clk *clk, unsigned long rate)
691{
692 unsigned long flags;
693 int ret = -EINVAL;
694
695 if (clk == NULL || IS_ERR(clk))
696 return ret;
697
698 spin_lock_irqsave(&clockfw_lock, flags);
699 ret = omap1_clk_set_rate(clk, rate);
700 if (ret == 0)
701 propagate_rate(clk);
702 spin_unlock_irqrestore(&clockfw_lock, flags);
703
704 return ret;
705}
706EXPORT_SYMBOL(clk_set_rate);
707
708int clk_set_parent(struct clk *clk, struct clk *parent)
709{
710 WARN_ONCE(1, "clk_set_parent() not implemented for OMAP1\n");
711
712 return -EINVAL;
713}
714EXPORT_SYMBOL(clk_set_parent);
715
716struct clk *clk_get_parent(struct clk *clk)
717{
718 return clk->parent;
719}
720EXPORT_SYMBOL(clk_get_parent);
721
722/*
723 * OMAP specific clock functions shared between omap1 and omap2
724 */
725
726int __initdata mpurate;
727
728/*
729 * By default we use the rate set by the bootloader.
730 * You can override this with mpurate= cmdline option.
731 */
732static int __init omap_clk_setup(char *str)
733{
734 get_option(&str, &mpurate);
735
736 if (!mpurate)
737 return 1;
738
739 if (mpurate < 1000)
740 mpurate *= 1000000;
741
742 return 1;
743}
744__setup("mpurate=", omap_clk_setup);
745
746/* Used for clocks that always have same value as the parent clock */
747unsigned long followparent_recalc(struct clk *clk)
748{
749 return clk->parent->rate;
750}
751
752/*
753 * Used for clocks that have the same value as the parent clock,
754 * divided by some factor
755 */
756unsigned long omap_fixed_divisor_recalc(struct clk *clk)
757{
758 WARN_ON(!clk->fixed_div);
759
760 return clk->parent->rate / clk->fixed_div;
761}
762
763void clk_reparent(struct clk *child, struct clk *parent)
764{
765 list_del_init(&child->sibling);
766 if (parent)
767 list_add(&child->sibling, &parent->children);
768 child->parent = parent;
769
770 /* now do the debugfs renaming to reattach the child
771 to the proper parent */
772}
773
774/* Propagate rate to children */
775void propagate_rate(struct clk *tclk)
776{
777 struct clk *clkp;
778
779 list_for_each_entry(clkp, &tclk->children, sibling) {
780 if (clkp->recalc)
781 clkp->rate = clkp->recalc(clkp);
782 propagate_rate(clkp);
783 }
784}
785
786static LIST_HEAD(root_clks);
787
788/**
789 * recalculate_root_clocks - recalculate and propagate all root clocks
790 *
791 * Recalculates all root clocks (clocks with no parent), which if the
792 * clock's .recalc is set correctly, should also propagate their rates.
793 * Called at init.
794 */
795void recalculate_root_clocks(void)
796{
797 struct clk *clkp;
798
799 list_for_each_entry(clkp, &root_clks, sibling) {
800 if (clkp->recalc)
801 clkp->rate = clkp->recalc(clkp);
802 propagate_rate(clkp);
803 }
804}
805
806/**
807 * clk_preinit - initialize any fields in the struct clk before clk init
808 * @clk: struct clk * to initialize
809 *
810 * Initialize any struct clk fields needed before normal clk initialization
811 * can run. No return value.
812 */
813void clk_preinit(struct clk *clk)
814{
815 INIT_LIST_HEAD(&clk->children);
816}
817
818int clk_register(struct clk *clk)
819{
820 if (clk == NULL || IS_ERR(clk))
821 return -EINVAL;
822
823 /*
824 * trap out already registered clocks
825 */
826 if (clk->node.next || clk->node.prev)
827 return 0;
828
829 mutex_lock(&clocks_mutex);
830 if (clk->parent)
831 list_add(&clk->sibling, &clk->parent->children);
832 else
833 list_add(&clk->sibling, &root_clks);
834
835 list_add(&clk->node, &clocks);
836 if (clk->init)
837 clk->init(clk);
838 mutex_unlock(&clocks_mutex);
839
840 return 0;
841}
842EXPORT_SYMBOL(clk_register);
843
844void clk_unregister(struct clk *clk)
845{
846 if (clk == NULL || IS_ERR(clk))
847 return;
848
849 mutex_lock(&clocks_mutex);
850 list_del(&clk->sibling);
851 list_del(&clk->node);
852 mutex_unlock(&clocks_mutex);
853}
854EXPORT_SYMBOL(clk_unregister);
855
856void clk_enable_init_clocks(void)
857{
858 struct clk *clkp;
859
860 list_for_each_entry(clkp, &clocks, node)
861 if (clkp->flags & ENABLE_ON_INIT)
862 clk_enable(clkp);
863}
864
865/**
866 * omap_clk_get_by_name - locate OMAP struct clk by its name
867 * @name: name of the struct clk to locate
868 *
869 * Locate an OMAP struct clk by its name. Assumes that struct clk
870 * names are unique. Returns NULL if not found or a pointer to the
871 * struct clk if found.
872 */
873struct clk *omap_clk_get_by_name(const char *name)
874{
875 struct clk *c;
876 struct clk *ret = NULL;
877
878 mutex_lock(&clocks_mutex);
879
880 list_for_each_entry(c, &clocks, node) {
881 if (!strcmp(c->name, name)) {
882 ret = c;
883 break;
884 }
885 }
886
887 mutex_unlock(&clocks_mutex);
888
889 return ret;
890}
891
892int omap_clk_enable_autoidle_all(void)
893{
894 struct clk *c;
895 unsigned long flags;
896
897 spin_lock_irqsave(&clockfw_lock, flags);
898
899 list_for_each_entry(c, &clocks, node)
900 if (c->ops->allow_idle)
901 c->ops->allow_idle(c);
902
903 spin_unlock_irqrestore(&clockfw_lock, flags);
904
905 return 0;
906}
907
908int omap_clk_disable_autoidle_all(void)
909{
910 struct clk *c;
911 unsigned long flags;
912
913 spin_lock_irqsave(&clockfw_lock, flags);
914
915 list_for_each_entry(c, &clocks, node)
916 if (c->ops->deny_idle)
917 c->ops->deny_idle(c);
918
919 spin_unlock_irqrestore(&clockfw_lock, flags);
920
921 return 0;
922}
923
924/*
925 * Low level helpers
926 */
927static int clkll_enable_null(struct clk *clk)
928{
929 return 0;
930}
931
932static void clkll_disable_null(struct clk *clk)
933{
934}
935
936const struct clkops clkops_null = {
937 .enable = clkll_enable_null,
938 .disable = clkll_disable_null,
939};
940
941/*
942 * Dummy clock
943 *
944 * Used for clock aliases that are needed on some OMAPs, but not others
945 */
946struct clk dummy_ck = {
947 .name = "dummy",
948 .ops = &clkops_null,
949};
950
951/*
952 *
953 */
954
955#ifdef CONFIG_OMAP_RESET_CLOCKS
956/*
957 * Disable any unused clocks left on by the bootloader
958 */
959static int __init clk_disable_unused(void)
960{
961 struct clk *ck;
962 unsigned long flags;
963
964 pr_info("clock: disabling unused clocks to save power\n");
965
966 spin_lock_irqsave(&clockfw_lock, flags);
967 list_for_each_entry(ck, &clocks, node) {
968 if (ck->ops == &clkops_null)
969 continue;
970
971 if (ck->usecount > 0 || !ck->enable_reg)
972 continue;
973
974 omap1_clk_disable_unused(ck);
975 }
976 spin_unlock_irqrestore(&clockfw_lock, flags);
977
978 return 0;
979}
980late_initcall(clk_disable_unused);
981late_initcall(omap_clk_enable_autoidle_all);
982#endif
983
984#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
985/*
986 * debugfs support to trace clock tree hierarchy and attributes
987 */
988
989#include <linux/debugfs.h>
990#include <linux/seq_file.h>
991
992static struct dentry *clk_debugfs_root;
993
994static int clk_dbg_show_summary(struct seq_file *s, void *unused)
995{
996 struct clk *c;
997 struct clk *pa;
998
999 mutex_lock(&clocks_mutex);
1000 seq_printf(s, "%-30s %-30s %-10s %s\n",
1001 "clock-name", "parent-name", "rate", "use-count");
1002
1003 list_for_each_entry(c, &clocks, node) {
1004 pa = c->parent;
1005 seq_printf(s, "%-30s %-30s %-10lu %d\n",
1006 c->name, pa ? pa->name : "none", c->rate,
1007 c->usecount);
1008 }
1009 mutex_unlock(&clocks_mutex);
1010
1011 return 0;
1012}
1013
1014static int clk_dbg_open(struct inode *inode, struct file *file)
1015{
1016 return single_open(file, clk_dbg_show_summary, inode->i_private);
1017}
1018
1019static const struct file_operations debug_clock_fops = {
1020 .open = clk_dbg_open,
1021 .read = seq_read,
1022 .llseek = seq_lseek,
1023 .release = single_release,
1024};
1025
1026static int clk_debugfs_register_one(struct clk *c)
1027{
1028 int err;
1029 struct dentry *d;
1030 struct clk *pa = c->parent;
1031
1032 d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
1033 if (!d)
1034 return -ENOMEM;
1035 c->dent = d;
1036
1037 d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
1038 if (!d) {
1039 err = -ENOMEM;
1040 goto err_out;
1041 }
1042 d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
1043 if (!d) {
1044 err = -ENOMEM;
1045 goto err_out;
1046 }
1047 d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
1048 if (!d) {
1049 err = -ENOMEM;
1050 goto err_out;
1051 }
1052 return 0;
1053
1054err_out:
1055 debugfs_remove_recursive(c->dent);
1056 return err;
1057}
1058
1059static int clk_debugfs_register(struct clk *c)
1060{
1061 int err;
1062 struct clk *pa = c->parent;
1063
1064 if (pa && !pa->dent) {
1065 err = clk_debugfs_register(pa);
1066 if (err)
1067 return err;
1068 }
1069
1070 if (!c->dent) {
1071 err = clk_debugfs_register_one(c);
1072 if (err)
1073 return err;
1074 }
1075 return 0;
1076}
1077
1078static int __init clk_debugfs_init(void)
1079{
1080 struct clk *c;
1081 struct dentry *d;
1082 int err;
1083
1084 d = debugfs_create_dir("clock", NULL);
1085 if (!d)
1086 return -ENOMEM;
1087 clk_debugfs_root = d;
1088
1089 list_for_each_entry(c, &clocks, node) {
1090 err = clk_debugfs_register(c);
1091 if (err)
1092 goto err_out;
1093 }
1094
1095 d = debugfs_create_file("summary", S_IRUGO,
1096 d, NULL, &debug_clock_fops);
1097 if (!d)
1098 return -ENOMEM;
1099
1100 return 0;
1101err_out:
1102 debugfs_remove_recursive(clk_debugfs_root);
1103 return err;
1104}
1105late_initcall(clk_debugfs_init);
1106
1107#endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */