blob: 5d2f19a09e44c7bc02b05bfbe16c420769b29104 [file] [log] [blame]
dmitry pervushin5cccd372009-04-23 12:24:13 +01001/*
2 * Clock manipulation routines for Freescale STMP37XX/STMP378X
3 *
4 * Author: Vitaly Wool <vital@embeddedalley.com>
5 *
6 * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
7 * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
8 */
9
10/*
11 * The code contained herein is licensed under the GNU General Public
12 * License. You may obtain a copy of the GNU General Public License
13 * Version 2 or later at the following locations:
14 *
15 * http://www.opensource.org/licenses/gpl-license.html
16 * http://www.gnu.org/copyleft/gpl.html
17 */
dmitry pervushin98f420b2009-05-31 13:32:11 +010018#define DEBUG
dmitry pervushin5cccd372009-04-23 12:24:13 +010019#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/clk.h>
23#include <linux/spinlock.h>
24#include <linux/errno.h>
25#include <linux/err.h>
26#include <linux/delay.h>
27#include <linux/io.h>
28
29#include <asm/mach-types.h>
30#include <asm/clkdev.h>
dmitry pervushin98f420b2009-05-31 13:32:11 +010031#include <mach/platform.h>
dmitry pervushin5cccd372009-04-23 12:24:13 +010032#include <mach/regs-clkctrl.h>
33
34#include "clock.h"
35
36static DEFINE_SPINLOCK(clocks_lock);
37
38static struct clk osc_24M;
39static struct clk pll_clk;
40static struct clk cpu_clk;
41static struct clk hclk;
42
43static int propagate_rate(struct clk *);
44
45static inline int clk_is_busy(struct clk *clk)
46{
47 return __raw_readl(clk->busy_reg) & (1 << clk->busy_bit);
48}
49
50static inline int clk_good(struct clk *clk)
51{
52 return clk && !IS_ERR(clk) && clk->ops;
53}
54
55static int std_clk_enable(struct clk *clk)
56{
57 if (clk->enable_reg) {
58 u32 clk_reg = __raw_readl(clk->enable_reg);
59 if (clk->enable_negate)
60 clk_reg &= ~(1 << clk->enable_shift);
61 else
62 clk_reg |= (1 << clk->enable_shift);
63 __raw_writel(clk_reg, clk->enable_reg);
64 if (clk->enable_wait)
65 udelay(clk->enable_wait);
66 return 0;
67 } else
68 return -EINVAL;
69}
70
71static int std_clk_disable(struct clk *clk)
72{
73 if (clk->enable_reg) {
74 u32 clk_reg = __raw_readl(clk->enable_reg);
75 if (clk->enable_negate)
76 clk_reg |= (1 << clk->enable_shift);
77 else
78 clk_reg &= ~(1 << clk->enable_shift);
79 __raw_writel(clk_reg, clk->enable_reg);
80 return 0;
81 } else
82 return -EINVAL;
83}
84
85static int io_set_rate(struct clk *clk, u32 rate)
86{
87 u32 reg_frac, clkctrl_frac;
88 int i, ret = 0, mask = 0x1f;
89
90 clkctrl_frac = (clk->parent->rate * 18 + rate - 1) / rate;
91
92 if (clkctrl_frac < 18 || clkctrl_frac > 35) {
93 ret = -EINVAL;
94 goto out;
95 }
96
97 reg_frac = __raw_readl(clk->scale_reg);
98 reg_frac &= ~(mask << clk->scale_shift);
99 __raw_writel(reg_frac | (clkctrl_frac << clk->scale_shift),
100 clk->scale_reg);
101 if (clk->busy_reg) {
102 for (i = 10000; i; i--)
103 if (!clk_is_busy(clk))
104 break;
105 if (!i)
106 ret = -ETIMEDOUT;
107 else
108 ret = 0;
109 }
110out:
111 return ret;
112}
113
114static long io_get_rate(struct clk *clk)
115{
116 long rate = clk->parent->rate * 18;
117 int mask = 0x1f;
118
119 rate /= (__raw_readl(clk->scale_reg) >> clk->scale_shift) & mask;
120 clk->rate = rate;
121
122 return rate;
123}
124
125static long per_get_rate(struct clk *clk)
126{
127 long rate = clk->parent->rate;
128 long div;
129 const int mask = 0xff;
130
131 if (clk->enable_reg &&
132 !(__raw_readl(clk->enable_reg) & clk->enable_shift))
133 clk->rate = 0;
134 else {
135 div = (__raw_readl(clk->scale_reg) >> clk->scale_shift) & mask;
136 if (div)
137 rate /= div;
138 clk->rate = rate;
139 }
140
141 return clk->rate;
142}
143
144static int per_set_rate(struct clk *clk, u32 rate)
145{
146 int ret = -EINVAL;
147 int div = (clk->parent->rate + rate - 1) / rate;
148 u32 reg_frac;
149 const int mask = 0xff;
150 int try = 10;
151 int i = -1;
152
153 if (div == 0 || div > mask)
154 goto out;
155
156 reg_frac = __raw_readl(clk->scale_reg);
157 reg_frac &= ~(mask << clk->scale_shift);
158
159 while (try--) {
160 __raw_writel(reg_frac | (div << clk->scale_shift),
161 clk->scale_reg);
162
163 if (clk->busy_reg) {
164 for (i = 10000; i; i--)
165 if (!clk_is_busy(clk))
166 break;
167 }
168 if (i)
169 break;
170 }
171
172 if (!i)
173 ret = -ETIMEDOUT;
174 else
175 ret = 0;
176
177out:
178 if (ret != 0)
179 printk(KERN_ERR "%s: error %d\n", __func__, ret);
180 return ret;
181}
182
183static long lcdif_get_rate(struct clk *clk)
184{
185 long rate = clk->parent->rate;
186 long div;
187 const int mask = 0xff;
188
189 div = (__raw_readl(clk->scale_reg) >> clk->scale_shift) & mask;
190 if (div) {
191 rate /= div;
dmitry pervushin98f420b2009-05-31 13:32:11 +0100192 div = (__raw_readl(REGS_CLKCTRL_BASE + HW_CLKCTRL_FRAC) &
193 BM_CLKCTRL_FRAC_PIXFRAC) >> BP_CLKCTRL_FRAC_PIXFRAC;
dmitry pervushin5cccd372009-04-23 12:24:13 +0100194 rate /= div;
195 }
196 clk->rate = rate;
197
198 return rate;
199}
200
201static int lcdif_set_rate(struct clk *clk, u32 rate)
202{
203 int ret = 0;
204 /*
205 * On 3700, we can get most timings exact by modifying ref_pix
206 * and the divider, but keeping the phase timings at 1 (2
207 * phases per cycle).
208 *
209 * ref_pix can be between 480e6*18/35=246.9MHz and 480e6*18/18=480MHz,
210 * which is between 18/(18*480e6)=2.084ns and 35/(18*480e6)=4.050ns.
211 *
212 * ns_cycle >= 2*18e3/(18*480) = 25/6
213 * ns_cycle <= 2*35e3/(18*480) = 875/108
214 *
215 * Multiply the ns_cycle by 'div' to lengthen it until it fits the
216 * bounds. This is the divider we'll use after ref_pix.
217 *
218 * 6 * ns_cycle >= 25 * div
219 * 108 * ns_cycle <= 875 * div
220 */
221 u32 ns_cycle = 1000000 / rate;
222 u32 div, reg_val;
223 u32 lowest_result = (u32) -1;
224 u32 lowest_div = 0, lowest_fracdiv = 0;
225
226 for (div = 1; div < 256; ++div) {
227 u32 fracdiv;
228 u32 ps_result;
229 int lower_bound = 6 * ns_cycle >= 25 * div;
230 int upper_bound = 108 * ns_cycle <= 875 * div;
231 if (!lower_bound)
232 break;
233 if (!upper_bound)
234 continue;
235 /*
236 * Found a matching div. Calculate fractional divider needed,
237 * rounded up.
238 */
239 fracdiv = ((clk->parent->rate / 1000 * 18 / 2) *
240 ns_cycle + 1000 * div - 1) /
241 (1000 * div);
242 if (fracdiv < 18 || fracdiv > 35) {
243 ret = -EINVAL;
244 goto out;
245 }
246 /* Calculate the actual cycle time this results in */
247 ps_result = 6250 * div * fracdiv / 27;
248
249 /* Use the fastest result that doesn't break ns_cycle */
250 if (ps_result <= lowest_result) {
251 lowest_result = ps_result;
252 lowest_div = div;
253 lowest_fracdiv = fracdiv;
254 }
255 }
256
257 if (div >= 256 || lowest_result == (u32) -1) {
258 ret = -EINVAL;
259 goto out;
260 }
261 pr_debug("Programming PFD=%u,DIV=%u ref_pix=%uMHz "
262 "PIXCLK=%uMHz cycle=%u.%03uns\n",
263 lowest_fracdiv, lowest_div,
264 480*18/lowest_fracdiv, 480*18/lowest_fracdiv/lowest_div,
265 lowest_result / 1000, lowest_result % 1000);
266
267 /* Program ref_pix phase fractional divider */
dmitry pervushin98f420b2009-05-31 13:32:11 +0100268 reg_val = __raw_readl(REGS_CLKCTRL_BASE + HW_CLKCTRL_FRAC);
269 reg_val &= ~BM_CLKCTRL_FRAC_PIXFRAC;
270 reg_val |= BF(lowest_fracdiv, CLKCTRL_FRAC_PIXFRAC);
271 __raw_writel(reg_val, REGS_CLKCTRL_BASE + HW_CLKCTRL_FRAC);
272
dmitry pervushin5cccd372009-04-23 12:24:13 +0100273 /* Ungate PFD */
dmitry pervushin98f420b2009-05-31 13:32:11 +0100274 stmp3xxx_clearl(BM_CLKCTRL_FRAC_CLKGATEPIX,
275 REGS_CLKCTRL_BASE + HW_CLKCTRL_FRAC);
dmitry pervushin5cccd372009-04-23 12:24:13 +0100276
277 /* Program pix divider */
278 reg_val = __raw_readl(clk->scale_reg);
279 reg_val &= ~(BM_CLKCTRL_PIX_DIV | BM_CLKCTRL_PIX_CLKGATE);
dmitry pervushin98f420b2009-05-31 13:32:11 +0100280 reg_val |= BF(lowest_div, CLKCTRL_PIX_DIV);
dmitry pervushin5cccd372009-04-23 12:24:13 +0100281 __raw_writel(reg_val, clk->scale_reg);
282
283 /* Wait for divider update */
284 if (clk->busy_reg) {
285 int i;
286 for (i = 10000; i; i--)
287 if (!clk_is_busy(clk))
288 break;
289 if (!i) {
290 ret = -ETIMEDOUT;
291 goto out;
292 }
293 }
294
295 /* Switch to ref_pix source */
dmitry pervushin98f420b2009-05-31 13:32:11 +0100296 reg_val = __raw_readl(REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ);
297 reg_val &= ~BM_CLKCTRL_CLKSEQ_BYPASS_PIX;
298 __raw_writel(reg_val, REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ);
dmitry pervushin5cccd372009-04-23 12:24:13 +0100299
300out:
301 return ret;
302}
303
304
305static int cpu_set_rate(struct clk *clk, u32 rate)
306{
dmitry pervushin98f420b2009-05-31 13:32:11 +0100307 u32 reg_val;
308
dmitry pervushin5cccd372009-04-23 12:24:13 +0100309 if (rate < 24000)
310 return -EINVAL;
311 else if (rate == 24000) {
312 /* switch to the 24M source */
313 clk_set_parent(clk, &osc_24M);
314 } else {
315 int i;
316 u32 clkctrl_cpu = 1;
317 u32 c = clkctrl_cpu;
318 u32 clkctrl_frac = 1;
319 u32 val;
320 for ( ; c < 0x40; c++) {
321 u32 f = (pll_clk.rate*18/c + rate/2) / rate;
322 int s1, s2;
323
324 if (f < 18 || f > 35)
325 continue;
326 s1 = pll_clk.rate*18/clkctrl_frac/clkctrl_cpu - rate;
327 s2 = pll_clk.rate*18/c/f - rate;
328 pr_debug("%s: s1 %d, s2 %d\n", __func__, s1, s2);
329 if (abs(s1) > abs(s2)) {
330 clkctrl_cpu = c;
331 clkctrl_frac = f;
332 }
333 if (s2 == 0)
334 break;
335 };
336 pr_debug("%s: clkctrl_cpu %d, clkctrl_frac %d\n", __func__,
337 clkctrl_cpu, clkctrl_frac);
338 if (c == 0x40) {
339 int d = pll_clk.rate*18/clkctrl_frac/clkctrl_cpu -
340 rate;
341 if (abs(d) > 100 ||
342 clkctrl_frac < 18 || clkctrl_frac > 35)
343 return -EINVAL;
344 }
345
346 /* 4.6.2 */
347 val = __raw_readl(clk->scale_reg);
348 val &= ~(0x3f << clk->scale_shift);
349 val |= clkctrl_frac;
350 clk_set_parent(clk, &osc_24M);
351 udelay(10);
352 __raw_writel(val, clk->scale_reg);
353 /* ungate */
354 __raw_writel(1<<7, clk->scale_reg + 8);
355 /* write clkctrl_cpu */
356 clk->saved_div = clkctrl_cpu;
dmitry pervushin98f420b2009-05-31 13:32:11 +0100357
358 reg_val = __raw_readl(REGS_CLKCTRL_BASE + HW_CLKCTRL_CPU);
359 reg_val &= ~0x3F;
360 reg_val |= clkctrl_cpu;
361 __raw_writel(reg_val, REGS_CLKCTRL_BASE + HW_CLKCTRL_CPU);
362
dmitry pervushin5cccd372009-04-23 12:24:13 +0100363 for (i = 10000; i; i--)
364 if (!clk_is_busy(clk))
365 break;
366 if (!i) {
367 printk(KERN_ERR "couldn't set up CPU divisor\n");
368 return -ETIMEDOUT;
369 }
370 clk_set_parent(clk, &pll_clk);
371 clk->saved_div = 0;
372 udelay(10);
373 }
374 return 0;
375}
376
377static long cpu_get_rate(struct clk *clk)
378{
379 long rate = clk->parent->rate * 18;
380
381 rate /= (__raw_readl(clk->scale_reg) >> clk->scale_shift) & 0x3f;
dmitry pervushin98f420b2009-05-31 13:32:11 +0100382 rate /= __raw_readl(REGS_CLKCTRL_BASE + HW_CLKCTRL_CPU) & 0x3f;
dmitry pervushin5cccd372009-04-23 12:24:13 +0100383 rate = ((rate + 9) / 10) * 10;
384 clk->rate = rate;
385
386 return rate;
387}
388
389static long cpu_round_rate(struct clk *clk, u32 rate)
390{
391 unsigned long r = 0;
392
393 if (rate <= 24000)
394 r = 24000;
395 else {
396 u32 clkctrl_cpu = 1;
397 u32 clkctrl_frac;
398 do {
399 clkctrl_frac =
400 (pll_clk.rate*18 / clkctrl_cpu + rate/2) / rate;
401 if (clkctrl_frac > 35)
402 continue;
403 if (pll_clk.rate*18 / clkctrl_frac / clkctrl_cpu/10 ==
404 rate / 10)
405 break;
406 } while (pll_clk.rate / 2 >= clkctrl_cpu++ * rate);
407 if (pll_clk.rate / 2 < (clkctrl_cpu - 1) * rate)
408 clkctrl_cpu--;
409 pr_debug("%s: clkctrl_cpu %d, clkctrl_frac %d\n", __func__,
410 clkctrl_cpu, clkctrl_frac);
411 if (clkctrl_frac < 18)
412 clkctrl_frac = 18;
413 if (clkctrl_frac > 35)
414 clkctrl_frac = 35;
415
416 r = pll_clk.rate * 18;
417 r /= clkctrl_frac;
418 r /= clkctrl_cpu;
419 r = 10 * ((r + 9) / 10);
420 }
421 return r;
422}
423
424static long emi_get_rate(struct clk *clk)
425{
426 long rate = clk->parent->rate * 18;
427
428 rate /= (__raw_readl(clk->scale_reg) >> clk->scale_shift) & 0x3f;
dmitry pervushin98f420b2009-05-31 13:32:11 +0100429 rate /= __raw_readl(REGS_CLKCTRL_BASE + HW_CLKCTRL_EMI) & 0x3f;
dmitry pervushin5cccd372009-04-23 12:24:13 +0100430 clk->rate = rate;
431
432 return rate;
433}
434
435static int clkseq_set_parent(struct clk *clk, struct clk *parent)
436{
437 int ret = -EINVAL;
438 int shift = 8;
439
440 /* bypass? */
441 if (parent == &osc_24M)
442 shift = 4;
443
444 if (clk->bypass_reg) {
dmitry pervushin98f420b2009-05-31 13:32:11 +0100445#ifdef CONFIG_ARCH_STMP378X
446 u32 hbus_val, cpu_val;
dmitry pervushin5cccd372009-04-23 12:24:13 +0100447
448 if (clk == &cpu_clk && shift == 4) {
dmitry pervushin98f420b2009-05-31 13:32:11 +0100449 hbus_val = __raw_readl(REGS_CLKCTRL_BASE +
450 HW_CLKCTRL_HBUS);
451 cpu_val = __raw_readl(REGS_CLKCTRL_BASE +
452 HW_CLKCTRL_CPU);
453
454 hbus_val &= ~(BM_CLKCTRL_HBUS_DIV_FRAC_EN |
455 BM_CLKCTRL_HBUS_DIV);
dmitry pervushin5cccd372009-04-23 12:24:13 +0100456 clk->saved_div = cpu_val & BM_CLKCTRL_CPU_DIV_CPU;
457 cpu_val &= ~BM_CLKCTRL_CPU_DIV_CPU;
458 cpu_val |= 1;
dmitry pervushin98f420b2009-05-31 13:32:11 +0100459
dmitry pervushin5cccd372009-04-23 12:24:13 +0100460 if (machine_is_stmp378x()) {
dmitry pervushin98f420b2009-05-31 13:32:11 +0100461 __raw_writel(hbus_val,
462 REGS_CLKCTRL_BASE + HW_CLKCTRL_HBUS);
463 __raw_writel(cpu_val,
464 REGS_CLKCTRL_BASE + HW_CLKCTRL_CPU);
dmitry pervushin5cccd372009-04-23 12:24:13 +0100465 hclk.rate = 0;
466 }
467 } else if (clk == &cpu_clk && shift == 8) {
dmitry pervushin98f420b2009-05-31 13:32:11 +0100468 hbus_val = __raw_readl(REGS_CLKCTRL_BASE +
469 HW_CLKCTRL_HBUS);
470 cpu_val = __raw_readl(REGS_CLKCTRL_BASE +
471 HW_CLKCTRL_CPU);
472 hbus_val &= ~(BM_CLKCTRL_HBUS_DIV_FRAC_EN |
473 BM_CLKCTRL_HBUS_DIV);
dmitry pervushin5cccd372009-04-23 12:24:13 +0100474 hbus_val |= 2;
475 cpu_val &= ~BM_CLKCTRL_CPU_DIV_CPU;
476 if (clk->saved_div)
477 cpu_val |= clk->saved_div;
478 else
479 cpu_val |= 2;
dmitry pervushin98f420b2009-05-31 13:32:11 +0100480
dmitry pervushin5cccd372009-04-23 12:24:13 +0100481 if (machine_is_stmp378x()) {
dmitry pervushin98f420b2009-05-31 13:32:11 +0100482 __raw_writel(hbus_val,
483 REGS_CLKCTRL_BASE + HW_CLKCTRL_HBUS);
484 __raw_writel(cpu_val,
485 REGS_CLKCTRL_BASE + HW_CLKCTRL_CPU);
dmitry pervushin5cccd372009-04-23 12:24:13 +0100486 hclk.rate = 0;
487 }
dmitry pervushin98f420b2009-05-31 13:32:11 +0100488 }
489#endif
490 __raw_writel(1 << clk->bypass_shift, clk->bypass_reg + shift);
dmitry pervushin5cccd372009-04-23 12:24:13 +0100491
492 ret = 0;
493 }
494
495 return ret;
496}
497
498static int hbus_set_rate(struct clk *clk, u32 rate)
499{
500 u8 div = 0;
501 int is_frac = 0;
502 u32 clkctrl_hbus;
503 struct clk *parent = clk->parent;
504
505 pr_debug("%s: rate %d, parent rate %d\n", __func__, rate,
506 parent->rate);
507
508 if (rate > parent->rate)
509 return -EINVAL;
510
511 if (((parent->rate + rate/2) / rate) * rate != parent->rate &&
512 parent->rate / rate < 32) {
513 pr_debug("%s: switching to fractional mode\n", __func__);
514 is_frac = 1;
515 }
516
517 if (is_frac)
518 div = (32 * rate + parent->rate / 2) / parent->rate;
519 else
520 div = (parent->rate + rate - 1) / rate;
521 pr_debug("%s: div calculated is %d\n", __func__, div);
522 if (!div || div > 0x1f)
523 return -EINVAL;
524
525 clk_set_parent(&cpu_clk, &osc_24M);
526 udelay(10);
527 clkctrl_hbus = __raw_readl(clk->scale_reg);
528 clkctrl_hbus &= ~0x3f;
529 clkctrl_hbus |= div;
530 clkctrl_hbus |= (is_frac << 5);
531
532 __raw_writel(clkctrl_hbus, clk->scale_reg);
533 if (clk->busy_reg) {
534 int i;
535 for (i = 10000; i; i--)
536 if (!clk_is_busy(clk))
537 break;
538 if (!i) {
539 printk(KERN_ERR "couldn't set up CPU divisor\n");
540 return -ETIMEDOUT;
541 }
542 }
543 clk_set_parent(&cpu_clk, &pll_clk);
544 __raw_writel(clkctrl_hbus, clk->scale_reg);
545 udelay(10);
546 return 0;
547}
548
549static long hbus_get_rate(struct clk *clk)
550{
551 long rate = clk->parent->rate;
552
553 if (__raw_readl(clk->scale_reg) & 0x20) {
554 rate *= __raw_readl(clk->scale_reg) & 0x1f;
555 rate /= 32;
556 } else
557 rate /= __raw_readl(clk->scale_reg) & 0x1f;
558 clk->rate = rate;
559
560 return rate;
561}
562
563static int xbus_set_rate(struct clk *clk, u32 rate)
564{
565 u16 div = 0;
566 u32 clkctrl_xbus;
567
568 pr_debug("%s: rate %d, parent rate %d\n", __func__, rate,
569 clk->parent->rate);
570
571 div = (clk->parent->rate + rate - 1) / rate;
572 pr_debug("%s: div calculated is %d\n", __func__, div);
573 if (!div || div > 0x3ff)
574 return -EINVAL;
575
576 clkctrl_xbus = __raw_readl(clk->scale_reg);
577 clkctrl_xbus &= ~0x3ff;
578 clkctrl_xbus |= div;
579 __raw_writel(clkctrl_xbus, clk->scale_reg);
580 if (clk->busy_reg) {
581 int i;
582 for (i = 10000; i; i--)
583 if (!clk_is_busy(clk))
584 break;
585 if (!i) {
586 printk(KERN_ERR "couldn't set up xbus divisor\n");
587 return -ETIMEDOUT;
588 }
589 }
590 return 0;
591}
592
593static long xbus_get_rate(struct clk *clk)
594{
595 long rate = clk->parent->rate;
596
597 rate /= __raw_readl(clk->scale_reg) & 0x3ff;
598 clk->rate = rate;
599
600 return rate;
601}
602
603
604/* Clock ops */
605
606static struct clk_ops std_ops = {
607 .enable = std_clk_enable,
608 .disable = std_clk_disable,
609 .get_rate = per_get_rate,
610 .set_rate = per_set_rate,
611 .set_parent = clkseq_set_parent,
612};
613
614static struct clk_ops min_ops = {
615 .enable = std_clk_enable,
616 .disable = std_clk_disable,
617};
618
619static struct clk_ops cpu_ops = {
620 .enable = std_clk_enable,
621 .disable = std_clk_disable,
622 .get_rate = cpu_get_rate,
623 .set_rate = cpu_set_rate,
624 .round_rate = cpu_round_rate,
625 .set_parent = clkseq_set_parent,
626};
627
628static struct clk_ops io_ops = {
629 .enable = std_clk_enable,
630 .disable = std_clk_disable,
631 .get_rate = io_get_rate,
632 .set_rate = io_set_rate,
633};
634
635static struct clk_ops hbus_ops = {
636 .get_rate = hbus_get_rate,
637 .set_rate = hbus_set_rate,
638};
639
640static struct clk_ops xbus_ops = {
641 .get_rate = xbus_get_rate,
642 .set_rate = xbus_set_rate,
643};
644
645static struct clk_ops lcdif_ops = {
646 .enable = std_clk_enable,
647 .disable = std_clk_disable,
648 .get_rate = lcdif_get_rate,
649 .set_rate = lcdif_set_rate,
650 .set_parent = clkseq_set_parent,
651};
652
653static struct clk_ops emi_ops = {
654 .get_rate = emi_get_rate,
655};
656
657/* List of on-chip clocks */
658
659static struct clk osc_24M = {
660 .flags = FIXED_RATE | ENABLED,
661 .rate = 24000,
662};
663
664static struct clk pll_clk = {
665 .parent = &osc_24M,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100666 .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_PLLCTRL0,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100667 .enable_shift = 16,
668 .enable_wait = 10,
669 .flags = FIXED_RATE | ENABLED,
670 .rate = 480000,
671 .ops = &min_ops,
672};
673
674static struct clk cpu_clk = {
675 .parent = &pll_clk,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100676 .scale_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_FRAC,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100677 .scale_shift = 0,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100678 .bypass_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100679 .bypass_shift = 7,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100680 .busy_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_CPU,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100681 .busy_bit = 28,
682 .flags = RATE_PROPAGATES | ENABLED,
683 .ops = &cpu_ops,
684};
685
686static struct clk io_clk = {
687 .parent = &pll_clk,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100688 .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_FRAC,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100689 .enable_shift = 31,
690 .enable_negate = 1,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100691 .scale_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_FRAC,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100692 .scale_shift = 24,
693 .flags = RATE_PROPAGATES | ENABLED,
694 .ops = &io_ops,
695};
696
697static struct clk hclk = {
698 .parent = &cpu_clk,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100699 .scale_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_HBUS,
700 .bypass_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100701 .bypass_shift = 7,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100702 .busy_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_HBUS,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100703 .busy_bit = 29,
704 .flags = RATE_PROPAGATES | ENABLED,
705 .ops = &hbus_ops,
706};
707
708static struct clk xclk = {
709 .parent = &osc_24M,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100710 .scale_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_XBUS,
711 .busy_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_XBUS,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100712 .busy_bit = 31,
713 .flags = RATE_PROPAGATES | ENABLED,
714 .ops = &xbus_ops,
715};
716
717static struct clk uart_clk = {
718 .parent = &xclk,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100719 .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_XTAL,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100720 .enable_shift = 31,
721 .enable_negate = 1,
722 .flags = ENABLED,
723 .ops = &min_ops,
724};
725
726static struct clk audio_clk = {
727 .parent = &xclk,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100728 .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_XTAL,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100729 .enable_shift = 30,
730 .enable_negate = 1,
731 .ops = &min_ops,
732};
733
734static struct clk pwm_clk = {
735 .parent = &xclk,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100736 .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_XTAL,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100737 .enable_shift = 29,
738 .enable_negate = 1,
739 .ops = &min_ops,
740};
741
742static struct clk dri_clk = {
743 .parent = &xclk,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100744 .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_XTAL,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100745 .enable_shift = 28,
746 .enable_negate = 1,
747 .ops = &min_ops,
748};
749
750static struct clk digctl_clk = {
751 .parent = &xclk,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100752 .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_XTAL,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100753 .enable_shift = 27,
754 .enable_negate = 1,
755 .ops = &min_ops,
756};
757
758static struct clk timer_clk = {
759 .parent = &xclk,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100760 .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_XTAL,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100761 .enable_shift = 26,
762 .enable_negate = 1,
763 .flags = ENABLED,
764 .ops = &min_ops,
765};
766
767static struct clk lcdif_clk = {
768 .parent = &pll_clk,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100769 .scale_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_PIX,
770 .busy_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_PIX,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100771 .busy_bit = 29,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100772 .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_PIX,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100773 .enable_shift = 31,
774 .enable_negate = 1,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100775 .bypass_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100776 .bypass_shift = 1,
777 .flags = NEEDS_SET_PARENT,
778 .ops = &lcdif_ops,
779};
780
781static struct clk ssp_clk = {
782 .parent = &io_clk,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100783 .scale_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_SSP,
784 .busy_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_SSP,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100785 .busy_bit = 29,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100786 .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_SSP,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100787 .enable_shift = 31,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100788 .bypass_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100789 .bypass_shift = 5,
790 .enable_negate = 1,
791 .flags = NEEDS_SET_PARENT,
792 .ops = &std_ops,
793};
794
795static struct clk gpmi_clk = {
796 .parent = &io_clk,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100797 .scale_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_GPMI,
798 .busy_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_GPMI,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100799 .busy_bit = 29,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100800 .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_GPMI,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100801 .enable_shift = 31,
802 .enable_negate = 1,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100803 .bypass_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100804 .bypass_shift = 4,
805 .flags = NEEDS_SET_PARENT,
806 .ops = &std_ops,
807};
808
809static struct clk spdif_clk = {
810 .parent = &pll_clk,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100811 .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_SPDIF,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100812 .enable_shift = 31,
813 .enable_negate = 1,
814 .ops = &min_ops,
815};
816
817static struct clk emi_clk = {
818 .parent = &pll_clk,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100819 .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_EMI,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100820 .enable_shift = 31,
821 .enable_negate = 1,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100822 .scale_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_FRAC,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100823 .scale_shift = 8,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100824 .busy_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_EMI,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100825 .busy_bit = 28,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100826 .bypass_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100827 .bypass_shift = 6,
828 .flags = ENABLED,
829 .ops = &emi_ops,
830};
831
832static struct clk ir_clk = {
833 .parent = &io_clk,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100834 .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_IR,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100835 .enable_shift = 31,
836 .enable_negate = 1,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100837 .bypass_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100838 .bypass_shift = 3,
839 .ops = &min_ops,
840};
841
842static struct clk saif_clk = {
843 .parent = &pll_clk,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100844 .scale_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_SAIF,
845 .busy_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_SAIF,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100846 .busy_bit = 29,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100847 .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_SAIF,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100848 .enable_shift = 31,
849 .enable_negate = 1,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100850 .bypass_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100851 .bypass_shift = 0,
852 .ops = &std_ops,
853};
854
855static struct clk usb_clk = {
856 .parent = &pll_clk,
dmitry pervushin98f420b2009-05-31 13:32:11 +0100857 .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_PLLCTRL0,
dmitry pervushin5cccd372009-04-23 12:24:13 +0100858 .enable_shift = 18,
859 .enable_negate = 1,
860 .ops = &min_ops,
861};
862
863/* list of all the clocks */
dmitry pervushin98f420b2009-05-31 13:32:11 +0100864static struct clk_lookup onchip_clks[] = {
dmitry pervushin5cccd372009-04-23 12:24:13 +0100865 {
866 .con_id = "osc_24M",
867 .clk = &osc_24M,
868 }, {
869 .con_id = "pll",
870 .clk = &pll_clk,
871 }, {
872 .con_id = "cpu",
873 .clk = &cpu_clk,
874 }, {
875 .con_id = "hclk",
876 .clk = &hclk,
877 }, {
878 .con_id = "xclk",
879 .clk = &xclk,
880 }, {
881 .con_id = "io",
882 .clk = &io_clk,
883 }, {
884 .con_id = "uart",
885 .clk = &uart_clk,
886 }, {
887 .con_id = "audio",
888 .clk = &audio_clk,
889 }, {
890 .con_id = "pwm",
891 .clk = &pwm_clk,
892 }, {
893 .con_id = "dri",
894 .clk = &dri_clk,
895 }, {
896 .con_id = "digctl",
897 .clk = &digctl_clk,
898 }, {
899 .con_id = "timer",
900 .clk = &timer_clk,
901 }, {
902 .con_id = "lcdif",
903 .clk = &lcdif_clk,
904 }, {
905 .con_id = "ssp",
906 .clk = &ssp_clk,
907 }, {
908 .con_id = "gpmi",
909 .clk = &gpmi_clk,
910 }, {
911 .con_id = "spdif",
912 .clk = &spdif_clk,
913 }, {
914 .con_id = "emi",
915 .clk = &emi_clk,
916 }, {
917 .con_id = "ir",
918 .clk = &ir_clk,
919 }, {
920 .con_id = "saif",
921 .clk = &saif_clk,
922 }, {
923 .con_id = "usb",
924 .clk = &usb_clk,
925 },
926};
927
928static int __init propagate_rate(struct clk *clk)
929{
930 struct clk_lookup *cl;
931
932 for (cl = onchip_clks; cl < onchip_clks + ARRAY_SIZE(onchip_clks);
933 cl++) {
934 if (unlikely(!clk_good(cl->clk)))
935 continue;
936 if (cl->clk->parent == clk && cl->clk->ops->get_rate) {
937 cl->clk->ops->get_rate(cl->clk);
938 if (cl->clk->flags & RATE_PROPAGATES)
939 propagate_rate(cl->clk);
940 }
941 }
942
943 return 0;
944}
945
946/* Exported API */
947unsigned long clk_get_rate(struct clk *clk)
948{
949 if (unlikely(!clk_good(clk)))
950 return 0;
951
952 if (clk->rate != 0)
953 return clk->rate;
954
955 if (clk->ops->get_rate != NULL)
956 return clk->ops->get_rate(clk);
957
958 return clk_get_rate(clk->parent);
959}
960EXPORT_SYMBOL(clk_get_rate);
961
962long clk_round_rate(struct clk *clk, unsigned long rate)
963{
964 if (unlikely(!clk_good(clk)))
965 return 0;
966
967 if (clk->ops->round_rate)
968 return clk->ops->round_rate(clk, rate);
969
970 return 0;
971}
972EXPORT_SYMBOL(clk_round_rate);
973
974static inline int close_enough(long rate1, long rate2)
975{
976 return rate1 && !((rate2 - rate1) * 1000 / rate1);
977}
978
979int clk_set_rate(struct clk *clk, unsigned long rate)
980{
981 int ret = -EINVAL;
982
983 if (unlikely(!clk_good(clk)))
984 goto out;
985
986 if (clk->flags & FIXED_RATE || !clk->ops->set_rate)
987 goto out;
988
989 else if (!close_enough(clk->rate, rate)) {
990 ret = clk->ops->set_rate(clk, rate);
991 if (ret < 0)
992 goto out;
993 clk->rate = rate;
994 if (clk->flags & RATE_PROPAGATES)
995 propagate_rate(clk);
996 } else
997 ret = 0;
998
999out:
1000 return ret;
1001}
1002EXPORT_SYMBOL(clk_set_rate);
1003
1004int clk_enable(struct clk *clk)
1005{
1006 unsigned long clocks_flags;
1007
1008 if (unlikely(!clk_good(clk)))
1009 return -EINVAL;
1010
1011 if (clk->parent)
1012 clk_enable(clk->parent);
1013
1014 spin_lock_irqsave(&clocks_lock, clocks_flags);
1015
1016 clk->usage++;
1017 if (clk->ops && clk->ops->enable)
1018 clk->ops->enable(clk);
1019
1020 spin_unlock_irqrestore(&clocks_lock, clocks_flags);
1021 return 0;
1022}
1023EXPORT_SYMBOL(clk_enable);
1024
1025static void local_clk_disable(struct clk *clk)
1026{
1027 if (unlikely(!clk_good(clk)))
1028 return;
1029
1030 if (clk->usage == 0 && clk->ops->disable)
1031 clk->ops->disable(clk);
1032
1033 if (clk->parent)
1034 local_clk_disable(clk->parent);
1035}
1036
1037void clk_disable(struct clk *clk)
1038{
1039 unsigned long clocks_flags;
1040
1041 if (unlikely(!clk_good(clk)))
1042 return;
1043
1044 spin_lock_irqsave(&clocks_lock, clocks_flags);
1045
1046 if ((--clk->usage) == 0 && clk->ops->disable)
1047 clk->ops->disable(clk);
1048
1049 spin_unlock_irqrestore(&clocks_lock, clocks_flags);
1050 if (clk->parent)
1051 clk_disable(clk->parent);
1052}
1053EXPORT_SYMBOL(clk_disable);
1054
1055/* Some additional API */
1056int clk_set_parent(struct clk *clk, struct clk *parent)
1057{
1058 int ret = -ENODEV;
1059 unsigned long clocks_flags;
1060
1061 if (unlikely(!clk_good(clk)))
1062 goto out;
1063
1064 if (!clk->ops->set_parent)
1065 goto out;
1066
1067 spin_lock_irqsave(&clocks_lock, clocks_flags);
1068
1069 ret = clk->ops->set_parent(clk, parent);
1070 if (!ret) {
1071 /* disable if usage count is 0 */
1072 local_clk_disable(parent);
1073
1074 parent->usage += clk->usage;
1075 clk->parent->usage -= clk->usage;
1076
1077 /* disable if new usage count is 0 */
1078 local_clk_disable(clk->parent);
1079
1080 clk->parent = parent;
1081 }
1082 spin_unlock_irqrestore(&clocks_lock, clocks_flags);
1083
1084out:
1085 return ret;
1086}
1087EXPORT_SYMBOL(clk_set_parent);
1088
1089struct clk *clk_get_parent(struct clk *clk)
1090{
1091 if (unlikely(!clk_good(clk)))
1092 return NULL;
1093 return clk->parent;
1094}
1095EXPORT_SYMBOL(clk_get_parent);
1096
1097static int __init clk_init(void)
1098{
1099 struct clk_lookup *cl;
1100 struct clk_ops *ops;
1101
1102 spin_lock_init(&clocks_lock);
1103
1104 for (cl = onchip_clks; cl < onchip_clks + ARRAY_SIZE(onchip_clks);
1105 cl++) {
1106 if (cl->clk->flags & ENABLED)
1107 clk_enable(cl->clk);
1108 else
1109 local_clk_disable(cl->clk);
1110
1111 ops = cl->clk->ops;
1112
1113 if ((cl->clk->flags & NEEDS_INITIALIZATION) &&
1114 ops && ops->set_rate)
1115 ops->set_rate(cl->clk, cl->clk->rate);
1116
1117 if (cl->clk->flags & FIXED_RATE) {
1118 if (cl->clk->flags & RATE_PROPAGATES)
1119 propagate_rate(cl->clk);
1120 } else {
1121 if (ops && ops->get_rate)
1122 ops->get_rate(cl->clk);
1123 }
1124
1125 if (cl->clk->flags & NEEDS_SET_PARENT) {
1126 if (ops && ops->set_parent)
1127 ops->set_parent(cl->clk, cl->clk->parent);
1128 }
1129
1130 clkdev_add(cl);
1131 }
1132 return 0;
1133}
1134
1135arch_initcall(clk_init);