blob: bf6218ee94e193a69b9de7ef1da240f2d320e16b [file] [log] [blame]
Vladimir Barinov3e062b02007-06-05 16:36:55 +01001/*
Kevin Hilmanc5b736d2009-03-20 17:29:01 -07002 * Clock and PLL control for DaVinci devices
Vladimir Barinov3e062b02007-06-05 16:36:55 +01003 *
Kevin Hilmanc5b736d2009-03-20 17:29:01 -07004 * Copyright (C) 2006-2007 Texas Instruments.
5 * Copyright (C) 2008-2009 Deep Root Systems, LLC
Vladimir Barinov3e062b02007-06-05 16:36:55 +01006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/errno.h>
Kevin Hilmanc5b736d2009-03-20 17:29:01 -070017#include <linux/clk.h>
Vladimir Barinov3e062b02007-06-05 16:36:55 +010018#include <linux/err.h>
19#include <linux/mutex.h>
Russell Kingfced80c2008-09-06 12:10:45 +010020#include <linux/io.h>
Sekhar Norid6a61562009-08-31 15:48:03 +053021#include <linux/delay.h>
Vladimir Barinov3e062b02007-06-05 16:36:55 +010022
Russell Kinga09e64f2008-08-05 16:14:15 +010023#include <mach/hardware.h>
Vladimir Barinov3e062b02007-06-05 16:36:55 +010024
Russell Kinga09e64f2008-08-05 16:14:15 +010025#include <mach/psc.h>
Kevin Hilmanc5b736d2009-03-20 17:29:01 -070026#include <mach/cputype.h>
Vladimir Barinov3e062b02007-06-05 16:36:55 +010027#include "clock.h"
28
Vladimir Barinov3e062b02007-06-05 16:36:55 +010029static LIST_HEAD(clocks);
30static DEFINE_MUTEX(clocks_mutex);
31static DEFINE_SPINLOCK(clockfw_lock);
32
Kevin Hilmanc5b736d2009-03-20 17:29:01 -070033static unsigned psc_domain(struct clk *clk)
Vladimir Barinov3e062b02007-06-05 16:36:55 +010034{
Kevin Hilmanc5b736d2009-03-20 17:29:01 -070035 return (clk->flags & PSC_DSP)
36 ? DAVINCI_GPSC_DSPDOMAIN
37 : DAVINCI_GPSC_ARMDOMAIN;
Vladimir Barinov3e062b02007-06-05 16:36:55 +010038}
Vladimir Barinov3e062b02007-06-05 16:36:55 +010039
Kevin Hilmanc5b736d2009-03-20 17:29:01 -070040static void __clk_enable(struct clk *clk)
Vladimir Barinov3e062b02007-06-05 16:36:55 +010041{
Kevin Hilmanc5b736d2009-03-20 17:29:01 -070042 if (clk->parent)
43 __clk_enable(clk->parent);
44 if (clk->usecount++ == 0 && (clk->flags & CLK_PSC))
Sergei Shtylyov789a7852009-09-30 19:48:03 +040045 davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc, 1);
Vladimir Barinov3e062b02007-06-05 16:36:55 +010046}
47
48static void __clk_disable(struct clk *clk)
49{
Kevin Hilmanc5b736d2009-03-20 17:29:01 -070050 if (WARN_ON(clk->usecount == 0))
Vladimir Barinov3e062b02007-06-05 16:36:55 +010051 return;
Chaithrika U S679f9212009-12-15 18:02:58 +053052 if (--clk->usecount == 0 && !(clk->flags & CLK_PLL) &&
53 (clk->flags & CLK_PSC))
Sergei Shtylyov789a7852009-09-30 19:48:03 +040054 davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc, 0);
Kevin Hilmanc5b736d2009-03-20 17:29:01 -070055 if (clk->parent)
56 __clk_disable(clk->parent);
Vladimir Barinov3e062b02007-06-05 16:36:55 +010057}
58
59int clk_enable(struct clk *clk)
60{
61 unsigned long flags;
Vladimir Barinov3e062b02007-06-05 16:36:55 +010062
63 if (clk == NULL || IS_ERR(clk))
64 return -EINVAL;
65
Kevin Hilmanc5b736d2009-03-20 17:29:01 -070066 spin_lock_irqsave(&clockfw_lock, flags);
67 __clk_enable(clk);
68 spin_unlock_irqrestore(&clockfw_lock, flags);
Vladimir Barinov3e062b02007-06-05 16:36:55 +010069
Kevin Hilmanc5b736d2009-03-20 17:29:01 -070070 return 0;
Vladimir Barinov3e062b02007-06-05 16:36:55 +010071}
72EXPORT_SYMBOL(clk_enable);
73
74void clk_disable(struct clk *clk)
75{
76 unsigned long flags;
77
78 if (clk == NULL || IS_ERR(clk))
79 return;
80
Kevin Hilmanc5b736d2009-03-20 17:29:01 -070081 spin_lock_irqsave(&clockfw_lock, flags);
82 __clk_disable(clk);
83 spin_unlock_irqrestore(&clockfw_lock, flags);
Vladimir Barinov3e062b02007-06-05 16:36:55 +010084}
85EXPORT_SYMBOL(clk_disable);
86
87unsigned long clk_get_rate(struct clk *clk)
88{
89 if (clk == NULL || IS_ERR(clk))
90 return -EINVAL;
91
Kevin Hilmanc5b736d2009-03-20 17:29:01 -070092 return clk->rate;
Vladimir Barinov3e062b02007-06-05 16:36:55 +010093}
94EXPORT_SYMBOL(clk_get_rate);
95
96long clk_round_rate(struct clk *clk, unsigned long rate)
97{
98 if (clk == NULL || IS_ERR(clk))
99 return -EINVAL;
100
Sekhar Norid6a61562009-08-31 15:48:03 +0530101 if (clk->round_rate)
102 return clk->round_rate(clk, rate);
103
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700104 return clk->rate;
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100105}
106EXPORT_SYMBOL(clk_round_rate);
107
Sekhar Norid6a61562009-08-31 15:48:03 +0530108/* Propagate rate to children */
109static void propagate_rate(struct clk *root)
110{
111 struct clk *clk;
112
113 list_for_each_entry(clk, &root->children, childnode) {
114 if (clk->recalc)
115 clk->rate = clk->recalc(clk);
116 propagate_rate(clk);
117 }
118}
119
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100120int clk_set_rate(struct clk *clk, unsigned long rate)
121{
Sekhar Norid6a61562009-08-31 15:48:03 +0530122 unsigned long flags;
123 int ret = -EINVAL;
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100124
Sekhar Norid6a61562009-08-31 15:48:03 +0530125 if (clk == NULL || IS_ERR(clk))
126 return ret;
127
Sekhar Norid6a61562009-08-31 15:48:03 +0530128 if (clk->set_rate)
129 ret = clk->set_rate(clk, rate);
Sekhar Nori3b43cd62010-01-12 18:55:35 +0530130
131 spin_lock_irqsave(&clockfw_lock, flags);
Sekhar Norid6a61562009-08-31 15:48:03 +0530132 if (ret == 0) {
133 if (clk->recalc)
134 clk->rate = clk->recalc(clk);
135 propagate_rate(clk);
136 }
137 spin_unlock_irqrestore(&clockfw_lock, flags);
138
139 return ret;
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100140}
141EXPORT_SYMBOL(clk_set_rate);
142
Sekhar Norib82a51e2009-08-31 15:48:04 +0530143int clk_set_parent(struct clk *clk, struct clk *parent)
144{
145 unsigned long flags;
146
147 if (clk == NULL || IS_ERR(clk))
148 return -EINVAL;
149
150 /* Cannot change parent on enabled clock */
151 if (WARN_ON(clk->usecount))
152 return -EINVAL;
153
154 mutex_lock(&clocks_mutex);
155 clk->parent = parent;
156 list_del_init(&clk->childnode);
157 list_add(&clk->childnode, &clk->parent->children);
158 mutex_unlock(&clocks_mutex);
159
160 spin_lock_irqsave(&clockfw_lock, flags);
161 if (clk->recalc)
162 clk->rate = clk->recalc(clk);
163 propagate_rate(clk);
164 spin_unlock_irqrestore(&clockfw_lock, flags);
165
166 return 0;
167}
168EXPORT_SYMBOL(clk_set_parent);
169
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100170int clk_register(struct clk *clk)
171{
172 if (clk == NULL || IS_ERR(clk))
173 return -EINVAL;
174
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700175 if (WARN(clk->parent && !clk->parent->rate,
176 "CLK: %s parent %s has no rate!\n",
177 clk->name, clk->parent->name))
178 return -EINVAL;
179
Sekhar Norif02bf3b2009-08-31 15:48:01 +0530180 INIT_LIST_HEAD(&clk->children);
181
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100182 mutex_lock(&clocks_mutex);
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700183 list_add_tail(&clk->node, &clocks);
Sekhar Norif02bf3b2009-08-31 15:48:01 +0530184 if (clk->parent)
185 list_add_tail(&clk->childnode, &clk->parent->children);
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100186 mutex_unlock(&clocks_mutex);
187
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700188 /* If rate is already set, use it */
189 if (clk->rate)
190 return 0;
191
Sekhar Noride381a92009-08-31 15:48:02 +0530192 /* Else, see if there is a way to calculate it */
193 if (clk->recalc)
194 clk->rate = clk->recalc(clk);
195
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700196 /* Otherwise, default to parent rate */
Sekhar Noride381a92009-08-31 15:48:02 +0530197 else if (clk->parent)
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700198 clk->rate = clk->parent->rate;
199
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100200 return 0;
201}
202EXPORT_SYMBOL(clk_register);
203
204void clk_unregister(struct clk *clk)
205{
206 if (clk == NULL || IS_ERR(clk))
207 return;
208
209 mutex_lock(&clocks_mutex);
210 list_del(&clk->node);
Sekhar Norif02bf3b2009-08-31 15:48:01 +0530211 list_del(&clk->childnode);
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100212 mutex_unlock(&clocks_mutex);
213}
214EXPORT_SYMBOL(clk_unregister);
215
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700216#ifdef CONFIG_DAVINCI_RESET_CLOCKS
217/*
218 * Disable any unused clocks left on by the bootloader
219 */
220static int __init clk_disable_unused(void)
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100221{
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700222 struct clk *ck;
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100223
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700224 spin_lock_irq(&clockfw_lock);
225 list_for_each_entry(ck, &clocks, node) {
226 if (ck->usecount > 0)
227 continue;
228 if (!(ck->flags & CLK_PSC))
229 continue;
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100230
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700231 /* ignore if in Disabled or SwRstDisable states */
Sergei Shtylyov789a7852009-09-30 19:48:03 +0400232 if (!davinci_psc_is_clk_active(ck->gpsc, ck->lpsc))
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700233 continue;
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100234
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700235 pr_info("Clocks: disable unused %s\n", ck->name);
Sergei Shtylyov789a7852009-09-30 19:48:03 +0400236 davinci_psc_config(psc_domain(ck), ck->gpsc, ck->lpsc, 0);
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700237 }
238 spin_unlock_irq(&clockfw_lock);
239
240 return 0;
241}
242late_initcall(clk_disable_unused);
243#endif
244
Sekhar Noride381a92009-08-31 15:48:02 +0530245static unsigned long clk_sysclk_recalc(struct clk *clk)
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700246{
247 u32 v, plldiv;
248 struct pll_data *pll;
Sekhar Noride381a92009-08-31 15:48:02 +0530249 unsigned long rate = clk->rate;
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700250
251 /* If this is the PLL base clock, no more calculations needed */
252 if (clk->pll_data)
Sekhar Noride381a92009-08-31 15:48:02 +0530253 return rate;
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700254
255 if (WARN_ON(!clk->parent))
Sekhar Noride381a92009-08-31 15:48:02 +0530256 return rate;
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700257
Sekhar Noride381a92009-08-31 15:48:02 +0530258 rate = clk->parent->rate;
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700259
260 /* Otherwise, the parent must be a PLL */
261 if (WARN_ON(!clk->parent->pll_data))
Sekhar Noride381a92009-08-31 15:48:02 +0530262 return rate;
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700263
264 pll = clk->parent->pll_data;
265
266 /* If pre-PLL, source clock is before the multiplier and divider(s) */
267 if (clk->flags & PRE_PLL)
Sekhar Noride381a92009-08-31 15:48:02 +0530268 rate = pll->input_rate;
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700269
270 if (!clk->div_reg)
Sekhar Noride381a92009-08-31 15:48:02 +0530271 return rate;
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700272
273 v = __raw_readl(pll->base + clk->div_reg);
274 if (v & PLLDIV_EN) {
275 plldiv = (v & PLLDIV_RATIO_MASK) + 1;
276 if (plldiv)
Sekhar Noride381a92009-08-31 15:48:02 +0530277 rate /= plldiv;
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700278 }
Sekhar Noride381a92009-08-31 15:48:02 +0530279
280 return rate;
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700281}
282
Sekhar Noride381a92009-08-31 15:48:02 +0530283static unsigned long clk_leafclk_recalc(struct clk *clk)
284{
285 if (WARN_ON(!clk->parent))
286 return clk->rate;
287
288 return clk->parent->rate;
289}
290
291static unsigned long clk_pllclk_recalc(struct clk *clk)
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700292{
293 u32 ctrl, mult = 1, prediv = 1, postdiv = 1;
294 u8 bypass;
295 struct pll_data *pll = clk->pll_data;
Sekhar Noride381a92009-08-31 15:48:02 +0530296 unsigned long rate = clk->rate;
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700297
298 pll->base = IO_ADDRESS(pll->phys_base);
299 ctrl = __raw_readl(pll->base + PLLCTL);
Sekhar Noride381a92009-08-31 15:48:02 +0530300 rate = pll->input_rate = clk->parent->rate;
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700301
302 if (ctrl & PLLCTL_PLLEN) {
303 bypass = 0;
304 mult = __raw_readl(pll->base + PLLM);
Sandeep Paulrajfb8fcb82009-06-11 09:41:05 -0400305 if (cpu_is_davinci_dm365())
306 mult = 2 * (mult & PLLM_PLLM_MASK);
307 else
308 mult = (mult & PLLM_PLLM_MASK) + 1;
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700309 } else
310 bypass = 1;
311
312 if (pll->flags & PLL_HAS_PREDIV) {
313 prediv = __raw_readl(pll->base + PREDIV);
314 if (prediv & PLLDIV_EN)
315 prediv = (prediv & PLLDIV_RATIO_MASK) + 1;
316 else
317 prediv = 1;
318 }
319
320 /* pre-divider is fixed, but (some?) chips won't report that */
321 if (cpu_is_davinci_dm355() && pll->num == 1)
322 prediv = 8;
323
324 if (pll->flags & PLL_HAS_POSTDIV) {
325 postdiv = __raw_readl(pll->base + POSTDIV);
326 if (postdiv & PLLDIV_EN)
327 postdiv = (postdiv & PLLDIV_RATIO_MASK) + 1;
328 else
329 postdiv = 1;
330 }
331
332 if (!bypass) {
Sekhar Noride381a92009-08-31 15:48:02 +0530333 rate /= prediv;
334 rate *= mult;
335 rate /= postdiv;
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700336 }
337
338 pr_debug("PLL%d: input = %lu MHz [ ",
339 pll->num, clk->parent->rate / 1000000);
340 if (bypass)
341 pr_debug("bypass ");
342 if (prediv > 1)
343 pr_debug("/ %d ", prediv);
344 if (mult > 1)
345 pr_debug("* %d ", mult);
346 if (postdiv > 1)
347 pr_debug("/ %d ", postdiv);
Sekhar Noride381a92009-08-31 15:48:02 +0530348 pr_debug("] --> %lu MHz output.\n", rate / 1000000);
349
350 return rate;
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700351}
352
Sekhar Norid6a61562009-08-31 15:48:03 +0530353/**
354 * davinci_set_pllrate - set the output rate of a given PLL.
355 *
356 * Note: Currently tested to work with OMAP-L138 only.
357 *
358 * @pll: pll whose rate needs to be changed.
359 * @prediv: The pre divider value. Passing 0 disables the pre-divider.
360 * @pllm: The multiplier value. Passing 0 leads to multiply-by-one.
361 * @postdiv: The post divider value. Passing 0 disables the post-divider.
362 */
363int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
364 unsigned int mult, unsigned int postdiv)
365{
366 u32 ctrl;
367 unsigned int locktime;
Sekhar Nori3b43cd62010-01-12 18:55:35 +0530368 unsigned long flags;
Sekhar Norid6a61562009-08-31 15:48:03 +0530369
370 if (pll->base == NULL)
371 return -EINVAL;
372
373 /*
374 * PLL lock time required per OMAP-L138 datasheet is
375 * (2000 * prediv)/sqrt(pllm) OSCIN cycles. We approximate sqrt(pllm)
376 * as 4 and OSCIN cycle as 25 MHz.
377 */
378 if (prediv) {
379 locktime = ((2000 * prediv) / 100);
380 prediv = (prediv - 1) | PLLDIV_EN;
381 } else {
Sekhar Nori9a219a92009-11-16 17:21:33 +0530382 locktime = PLL_LOCK_TIME;
Sekhar Norid6a61562009-08-31 15:48:03 +0530383 }
384 if (postdiv)
385 postdiv = (postdiv - 1) | PLLDIV_EN;
386 if (mult)
387 mult = mult - 1;
388
Sekhar Nori3b43cd62010-01-12 18:55:35 +0530389 /* Protect against simultaneous calls to PLL setting seqeunce */
390 spin_lock_irqsave(&clockfw_lock, flags);
391
Sekhar Norid6a61562009-08-31 15:48:03 +0530392 ctrl = __raw_readl(pll->base + PLLCTL);
393
394 /* Switch the PLL to bypass mode */
395 ctrl &= ~(PLLCTL_PLLENSRC | PLLCTL_PLLEN);
396 __raw_writel(ctrl, pll->base + PLLCTL);
397
Sekhar Nori9a219a92009-11-16 17:21:33 +0530398 udelay(PLL_BYPASS_TIME);
Sekhar Norid6a61562009-08-31 15:48:03 +0530399
400 /* Reset and enable PLL */
401 ctrl &= ~(PLLCTL_PLLRST | PLLCTL_PLLDIS);
402 __raw_writel(ctrl, pll->base + PLLCTL);
403
404 if (pll->flags & PLL_HAS_PREDIV)
405 __raw_writel(prediv, pll->base + PREDIV);
406
407 __raw_writel(mult, pll->base + PLLM);
408
409 if (pll->flags & PLL_HAS_POSTDIV)
410 __raw_writel(postdiv, pll->base + POSTDIV);
411
Sekhar Nori9a219a92009-11-16 17:21:33 +0530412 udelay(PLL_RESET_TIME);
Sekhar Norid6a61562009-08-31 15:48:03 +0530413
414 /* Bring PLL out of reset */
415 ctrl |= PLLCTL_PLLRST;
416 __raw_writel(ctrl, pll->base + PLLCTL);
417
418 udelay(locktime);
419
420 /* Remove PLL from bypass mode */
421 ctrl |= PLLCTL_PLLEN;
422 __raw_writel(ctrl, pll->base + PLLCTL);
423
Sekhar Nori3b43cd62010-01-12 18:55:35 +0530424 spin_unlock_irqrestore(&clockfw_lock, flags);
425
Sekhar Norid6a61562009-08-31 15:48:03 +0530426 return 0;
427}
428EXPORT_SYMBOL(davinci_set_pllrate);
429
Kevin Hilman08aca082010-01-11 08:22:23 -0800430int __init davinci_clk_init(struct clk_lookup *clocks)
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700431 {
Kevin Hilman08aca082010-01-11 08:22:23 -0800432 struct clk_lookup *c;
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700433 struct clk *clk;
Kevin Hilman08aca082010-01-11 08:22:23 -0800434 size_t num_clocks = 0;
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700435
Kevin Hilman08aca082010-01-11 08:22:23 -0800436 for (c = clocks; c->clk; c++) {
437 clk = c->clk;
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700438
Sekhar Noride381a92009-08-31 15:48:02 +0530439 if (!clk->recalc) {
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700440
Sekhar Noride381a92009-08-31 15:48:02 +0530441 /* Check if clock is a PLL */
442 if (clk->pll_data)
443 clk->recalc = clk_pllclk_recalc;
444
445 /* Else, if it is a PLL-derived clock */
446 else if (clk->flags & CLK_PLL)
447 clk->recalc = clk_sysclk_recalc;
448
449 /* Otherwise, it is a leaf clock (PSC clock) */
450 else if (clk->parent)
451 clk->recalc = clk_leafclk_recalc;
452 }
453
454 if (clk->recalc)
455 clk->rate = clk->recalc(clk);
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700456
457 if (clk->lpsc)
458 clk->flags |= CLK_PSC;
459
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700460 clk_register(clk);
Kevin Hilman08aca082010-01-11 08:22:23 -0800461 num_clocks++;
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700462
463 /* Turn on clocks that Linux doesn't otherwise manage */
464 if (clk->flags & ALWAYS_ENABLED)
465 clk_enable(clk);
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100466 }
467
Kevin Hilman08aca082010-01-11 08:22:23 -0800468 clkdev_add_table(clocks, num_clocks);
469
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100470 return 0;
471}
472
Sekhar Nori2f72e8d2009-12-03 15:36:52 +0530473#ifdef CONFIG_DEBUG_FS
474
475#include <linux/debugfs.h>
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100476#include <linux/seq_file.h>
477
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700478#define CLKNAME_MAX 10 /* longest clock name */
479#define NEST_DELTA 2
480#define NEST_MAX 4
481
482static void
483dump_clock(struct seq_file *s, unsigned nest, struct clk *parent)
484{
485 char *state;
486 char buf[CLKNAME_MAX + NEST_DELTA * NEST_MAX];
487 struct clk *clk;
488 unsigned i;
489
490 if (parent->flags & CLK_PLL)
491 state = "pll";
492 else if (parent->flags & CLK_PSC)
493 state = "psc";
494 else
495 state = "";
496
497 /* <nest spaces> name <pad to end> */
498 memset(buf, ' ', sizeof(buf) - 1);
499 buf[sizeof(buf) - 1] = 0;
500 i = strlen(parent->name);
501 memcpy(buf + nest, parent->name,
502 min(i, (unsigned)(sizeof(buf) - 1 - nest)));
503
504 seq_printf(s, "%s users=%2d %-3s %9ld Hz\n",
505 buf, parent->usecount, state, clk_get_rate(parent));
506 /* REVISIT show device associations too */
507
508 /* cost is now small, but not linear... */
Sekhar Norif02bf3b2009-08-31 15:48:01 +0530509 list_for_each_entry(clk, &parent->children, childnode) {
510 dump_clock(s, nest + NEST_DELTA, clk);
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700511 }
512}
513
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100514static int davinci_ck_show(struct seq_file *m, void *v)
515{
Sekhar Norif979aa62009-12-03 15:36:51 +0530516 struct clk *clk;
517
518 /*
519 * Show clock tree; We trust nonzero usecounts equate to PSC enables...
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700520 */
521 mutex_lock(&clocks_mutex);
Sekhar Norif979aa62009-12-03 15:36:51 +0530522 list_for_each_entry(clk, &clocks, node)
523 if (!clk->parent)
524 dump_clock(m, 0, clk);
Kevin Hilmanc5b736d2009-03-20 17:29:01 -0700525 mutex_unlock(&clocks_mutex);
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100526
527 return 0;
528}
529
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100530static int davinci_ck_open(struct inode *inode, struct file *file)
531{
Sekhar Nori2f72e8d2009-12-03 15:36:52 +0530532 return single_open(file, davinci_ck_show, NULL);
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100533}
534
Sekhar Nori2f72e8d2009-12-03 15:36:52 +0530535static const struct file_operations davinci_ck_operations = {
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100536 .open = davinci_ck_open,
537 .read = seq_read,
538 .llseek = seq_lseek,
Sekhar Nori2f72e8d2009-12-03 15:36:52 +0530539 .release = single_release,
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100540};
541
Sekhar Nori2f72e8d2009-12-03 15:36:52 +0530542static int __init davinci_clk_debugfs_init(void)
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100543{
Sekhar Nori2f72e8d2009-12-03 15:36:52 +0530544 debugfs_create_file("davinci_clocks", S_IFREG | S_IRUGO, NULL, NULL,
545 &davinci_ck_operations);
Vladimir Barinov3e062b02007-06-05 16:36:55 +0100546 return 0;
547
548}
Sekhar Nori2f72e8d2009-12-03 15:36:52 +0530549device_initcall(davinci_clk_debugfs_init);
550#endif /* CONFIG_DEBUG_FS */