blob: db257a35e71a545aa724b97d40dbdd6dbf1fe7f2 [file] [log] [blame]
Magnus Damm8b5ee112010-05-11 13:29:25 +00001/*
Paul Mundtde9186c2010-10-18 21:32:58 +09002 * SuperH clock framework
Magnus Damm8b5ee112010-05-11 13:29:25 +00003 *
Paul Mundt960bc362010-08-20 19:10:38 +09004 * Copyright (C) 2005 - 2010 Paul Mundt
Magnus Damm8b5ee112010-05-11 13:29:25 +00005 *
6 * This clock framework is derived from the OMAP version by:
7 *
8 * Copyright (C) 2004 - 2008 Nokia Corporation
9 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10 *
11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12 *
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
15 * for more details.
16 */
Paul Mundt550a1ef2010-10-13 19:24:55 +090017#define pr_fmt(fmt) "clock: " fmt
18
Magnus Damm8b5ee112010-05-11 13:29:25 +000019#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/list.h>
Rafael J. Wysockia696b892011-03-22 20:19:28 +000024#include <linux/syscore_ops.h>
Magnus Damm8b5ee112010-05-11 13:29:25 +000025#include <linux/seq_file.h>
26#include <linux/err.h>
Paul Mundt28085bc2010-10-15 16:46:37 +090027#include <linux/io.h>
Magnus Damm8b5ee112010-05-11 13:29:25 +000028#include <linux/cpufreq.h>
29#include <linux/clk.h>
30#include <linux/sh_clk.h>
31
32static LIST_HEAD(clock_list);
33static DEFINE_SPINLOCK(clock_lock);
34static DEFINE_MUTEX(clock_list_sem);
35
Magnus Damm794d78f2011-06-21 07:55:12 +000036/* clock disable operations are not passed on to hardware during boot */
37static int allow_disable;
38
Magnus Damm8b5ee112010-05-11 13:29:25 +000039void clk_rate_table_build(struct clk *clk,
40 struct cpufreq_frequency_table *freq_table,
41 int nr_freqs,
42 struct clk_div_mult_table *src_table,
43 unsigned long *bitmap)
44{
45 unsigned long mult, div;
46 unsigned long freq;
47 int i;
48
Paul Mundtf5869032010-10-15 18:17:35 +090049 clk->nr_freqs = nr_freqs;
50
Magnus Damm8b5ee112010-05-11 13:29:25 +000051 for (i = 0; i < nr_freqs; i++) {
52 div = 1;
53 mult = 1;
54
55 if (src_table->divisors && i < src_table->nr_divisors)
56 div = src_table->divisors[i];
57
58 if (src_table->multipliers && i < src_table->nr_multipliers)
59 mult = src_table->multipliers[i];
60
61 if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
62 freq = CPUFREQ_ENTRY_INVALID;
63 else
64 freq = clk->parent->rate * mult / div;
65
66 freq_table[i].index = i;
67 freq_table[i].frequency = freq;
68 }
69
70 /* Termination entry */
71 freq_table[i].index = i;
72 freq_table[i].frequency = CPUFREQ_TABLE_END;
73}
74
Paul Mundtf5869032010-10-15 18:17:35 +090075struct clk_rate_round_data;
76
77struct clk_rate_round_data {
78 unsigned long rate;
79 unsigned int min, max;
Paul Mundt8e122db2010-10-15 18:33:24 +090080 long (*func)(unsigned int, struct clk_rate_round_data *);
Paul Mundtf5869032010-10-15 18:17:35 +090081 void *arg;
82};
83
84#define for_each_frequency(pos, r, freq) \
Paul Mundte5690e02010-10-16 00:51:05 +090085 for (pos = r->min, freq = r->func(pos, r); \
Kuninori Morimotoc2590f42010-10-18 03:50:29 +000086 pos <= r->max; pos++, freq = r->func(pos, r)) \
Paul Mundtf5869032010-10-15 18:17:35 +090087 if (unlikely(freq == 0)) \
88 ; \
89 else
90
91static long clk_rate_round_helper(struct clk_rate_round_data *rounder)
Magnus Damm8b5ee112010-05-11 13:29:25 +000092{
93 unsigned long rate_error, rate_error_prev = ~0UL;
Paul Mundtf5869032010-10-15 18:17:35 +090094 unsigned long highest, lowest, freq;
Paul Mundt5aefa342010-11-01 15:30:31 -040095 long rate_best_fit = -ENOENT;
Magnus Damm8b5ee112010-05-11 13:29:25 +000096 int i;
97
Paul Mundt960bc362010-08-20 19:10:38 +090098 highest = 0;
99 lowest = ~0UL;
100
Paul Mundtf5869032010-10-15 18:17:35 +0900101 for_each_frequency(i, rounder, freq) {
Paul Mundt960bc362010-08-20 19:10:38 +0900102 if (freq > highest)
103 highest = freq;
104 if (freq < lowest)
105 lowest = freq;
106
Paul Mundtf5869032010-10-15 18:17:35 +0900107 rate_error = abs(freq - rounder->rate);
Magnus Damm8b5ee112010-05-11 13:29:25 +0000108 if (rate_error < rate_error_prev) {
109 rate_best_fit = freq;
110 rate_error_prev = rate_error;
111 }
112
113 if (rate_error == 0)
114 break;
115 }
116
Paul Mundtf5869032010-10-15 18:17:35 +0900117 if (rounder->rate >= highest)
Paul Mundt960bc362010-08-20 19:10:38 +0900118 rate_best_fit = highest;
Paul Mundtf5869032010-10-15 18:17:35 +0900119 if (rounder->rate <= lowest)
Paul Mundt960bc362010-08-20 19:10:38 +0900120 rate_best_fit = lowest;
121
Magnus Damm8b5ee112010-05-11 13:29:25 +0000122 return rate_best_fit;
123}
124
Paul Mundtf5869032010-10-15 18:17:35 +0900125static long clk_rate_table_iter(unsigned int pos,
126 struct clk_rate_round_data *rounder)
127{
128 struct cpufreq_frequency_table *freq_table = rounder->arg;
129 unsigned long freq = freq_table[pos].frequency;
130
131 if (freq == CPUFREQ_ENTRY_INVALID)
132 freq = 0;
133
134 return freq;
135}
136
137long clk_rate_table_round(struct clk *clk,
138 struct cpufreq_frequency_table *freq_table,
139 unsigned long rate)
140{
141 struct clk_rate_round_data table_round = {
142 .min = 0,
Kuninori Morimotoc2590f42010-10-18 03:50:29 +0000143 .max = clk->nr_freqs - 1,
Paul Mundtf5869032010-10-15 18:17:35 +0900144 .func = clk_rate_table_iter,
145 .arg = freq_table,
146 .rate = rate,
147 };
148
Kuninori Morimotoc2590f42010-10-18 03:50:29 +0000149 if (clk->nr_freqs < 1)
Paul Mundt5aefa342010-11-01 15:30:31 -0400150 return -ENOSYS;
Kuninori Morimotoc2590f42010-10-18 03:50:29 +0000151
Paul Mundtf5869032010-10-15 18:17:35 +0900152 return clk_rate_round_helper(&table_round);
153}
154
Paul Mundt8e122db2010-10-15 18:33:24 +0900155static long clk_rate_div_range_iter(unsigned int pos,
156 struct clk_rate_round_data *rounder)
157{
158 return clk_get_rate(rounder->arg) / pos;
159}
160
161long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
162 unsigned int div_max, unsigned long rate)
163{
164 struct clk_rate_round_data div_range_round = {
165 .min = div_min,
166 .max = div_max,
167 .func = clk_rate_div_range_iter,
168 .arg = clk_get_parent(clk),
169 .rate = rate,
170 };
171
172 return clk_rate_round_helper(&div_range_round);
173}
174
Kuninori Morimotodd2c0ca2011-09-19 18:51:13 -0700175static long clk_rate_mult_range_iter(unsigned int pos,
176 struct clk_rate_round_data *rounder)
177{
178 return clk_get_rate(rounder->arg) * pos;
179}
180
181long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min,
182 unsigned int mult_max, unsigned long rate)
183{
184 struct clk_rate_round_data mult_range_round = {
185 .min = mult_min,
186 .max = mult_max,
187 .func = clk_rate_mult_range_iter,
188 .arg = clk_get_parent(clk),
189 .rate = rate,
190 };
191
192 return clk_rate_round_helper(&mult_range_round);
193}
194
Magnus Damm8b5ee112010-05-11 13:29:25 +0000195int clk_rate_table_find(struct clk *clk,
196 struct cpufreq_frequency_table *freq_table,
197 unsigned long rate)
198{
199 int i;
200
201 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
202 unsigned long freq = freq_table[i].frequency;
203
204 if (freq == CPUFREQ_ENTRY_INVALID)
205 continue;
206
207 if (freq == rate)
208 return i;
209 }
210
211 return -ENOENT;
212}
213
214/* Used for clocks that always have same value as the parent clock */
215unsigned long followparent_recalc(struct clk *clk)
216{
217 return clk->parent ? clk->parent->rate : 0;
218}
219
220int clk_reparent(struct clk *child, struct clk *parent)
221{
222 list_del_init(&child->sibling);
223 if (parent)
224 list_add(&child->sibling, &parent->children);
225 child->parent = parent;
226
Magnus Damm8b5ee112010-05-11 13:29:25 +0000227 return 0;
228}
229
230/* Propagate rate to children */
231void propagate_rate(struct clk *tclk)
232{
233 struct clk *clkp;
234
235 list_for_each_entry(clkp, &tclk->children, sibling) {
236 if (clkp->ops && clkp->ops->recalc)
237 clkp->rate = clkp->ops->recalc(clkp);
238
239 propagate_rate(clkp);
240 }
241}
242
243static void __clk_disable(struct clk *clk)
244{
Magnus Damm69395392010-10-13 07:44:36 +0000245 if (WARN(!clk->usecount, "Trying to disable clock %p with 0 usecount\n",
246 clk))
Magnus Damm8b5ee112010-05-11 13:29:25 +0000247 return;
Magnus Damm8b5ee112010-05-11 13:29:25 +0000248
249 if (!(--clk->usecount)) {
Magnus Damm794d78f2011-06-21 07:55:12 +0000250 if (likely(allow_disable && clk->ops && clk->ops->disable))
Magnus Damm8b5ee112010-05-11 13:29:25 +0000251 clk->ops->disable(clk);
252 if (likely(clk->parent))
253 __clk_disable(clk->parent);
254 }
255}
256
257void clk_disable(struct clk *clk)
258{
259 unsigned long flags;
260
261 if (!clk)
262 return;
263
264 spin_lock_irqsave(&clock_lock, flags);
265 __clk_disable(clk);
266 spin_unlock_irqrestore(&clock_lock, flags);
267}
268EXPORT_SYMBOL_GPL(clk_disable);
269
270static int __clk_enable(struct clk *clk)
271{
272 int ret = 0;
273
274 if (clk->usecount++ == 0) {
275 if (clk->parent) {
276 ret = __clk_enable(clk->parent);
277 if (unlikely(ret))
278 goto err;
279 }
280
281 if (clk->ops && clk->ops->enable) {
282 ret = clk->ops->enable(clk);
283 if (ret) {
284 if (clk->parent)
285 __clk_disable(clk->parent);
286 goto err;
287 }
288 }
289 }
290
291 return ret;
292err:
293 clk->usecount--;
294 return ret;
295}
296
297int clk_enable(struct clk *clk)
298{
299 unsigned long flags;
300 int ret;
301
302 if (!clk)
303 return -EINVAL;
304
305 spin_lock_irqsave(&clock_lock, flags);
306 ret = __clk_enable(clk);
307 spin_unlock_irqrestore(&clock_lock, flags);
308
309 return ret;
310}
311EXPORT_SYMBOL_GPL(clk_enable);
312
313static LIST_HEAD(root_clks);
314
315/**
316 * recalculate_root_clocks - recalculate and propagate all root clocks
317 *
318 * Recalculates all root clocks (clocks with no parent), which if the
319 * clock's .recalc is set correctly, should also propagate their rates.
320 * Called at init.
321 */
322void recalculate_root_clocks(void)
323{
324 struct clk *clkp;
325
326 list_for_each_entry(clkp, &root_clks, sibling) {
327 if (clkp->ops && clkp->ops->recalc)
328 clkp->rate = clkp->ops->recalc(clkp);
329 propagate_rate(clkp);
330 }
331}
332
Paul Mundt28085bc2010-10-15 16:46:37 +0900333static struct clk_mapping dummy_mapping;
334
335static struct clk *lookup_root_clock(struct clk *clk)
336{
337 while (clk->parent)
338 clk = clk->parent;
339
340 return clk;
341}
342
343static int clk_establish_mapping(struct clk *clk)
344{
345 struct clk_mapping *mapping = clk->mapping;
346
347 /*
348 * Propagate mappings.
349 */
350 if (!mapping) {
351 struct clk *clkp;
352
353 /*
354 * dummy mapping for root clocks with no specified ranges
355 */
356 if (!clk->parent) {
357 clk->mapping = &dummy_mapping;
358 return 0;
359 }
360
361 /*
362 * If we're on a child clock and it provides no mapping of its
363 * own, inherit the mapping from its root clock.
364 */
365 clkp = lookup_root_clock(clk);
366 mapping = clkp->mapping;
367 BUG_ON(!mapping);
368 }
369
370 /*
371 * Establish initial mapping.
372 */
373 if (!mapping->base && mapping->phys) {
374 kref_init(&mapping->ref);
375
376 mapping->base = ioremap_nocache(mapping->phys, mapping->len);
377 if (unlikely(!mapping->base))
378 return -ENXIO;
379 } else if (mapping->base) {
380 /*
381 * Bump the refcount for an existing mapping
382 */
383 kref_get(&mapping->ref);
384 }
385
386 clk->mapping = mapping;
387 return 0;
388}
389
390static void clk_destroy_mapping(struct kref *kref)
391{
392 struct clk_mapping *mapping;
393
394 mapping = container_of(kref, struct clk_mapping, ref);
395
396 iounmap(mapping->base);
397}
398
399static void clk_teardown_mapping(struct clk *clk)
400{
401 struct clk_mapping *mapping = clk->mapping;
402
403 /* Nothing to do */
404 if (mapping == &dummy_mapping)
405 return;
406
407 kref_put(&mapping->ref, clk_destroy_mapping);
408 clk->mapping = NULL;
409}
410
Magnus Damm8b5ee112010-05-11 13:29:25 +0000411int clk_register(struct clk *clk)
412{
Paul Mundt28085bc2010-10-15 16:46:37 +0900413 int ret;
414
Paul Mundt225ca452011-06-24 17:35:40 +0900415 if (IS_ERR_OR_NULL(clk))
Magnus Damm8b5ee112010-05-11 13:29:25 +0000416 return -EINVAL;
417
418 /*
419 * trap out already registered clocks
420 */
421 if (clk->node.next || clk->node.prev)
422 return 0;
423
424 mutex_lock(&clock_list_sem);
425
426 INIT_LIST_HEAD(&clk->children);
427 clk->usecount = 0;
428
Paul Mundt28085bc2010-10-15 16:46:37 +0900429 ret = clk_establish_mapping(clk);
430 if (unlikely(ret))
431 goto out_unlock;
432
Magnus Damm8b5ee112010-05-11 13:29:25 +0000433 if (clk->parent)
434 list_add(&clk->sibling, &clk->parent->children);
435 else
436 list_add(&clk->sibling, &root_clks);
437
438 list_add(&clk->node, &clock_list);
Paul Mundtf278ea82010-11-19 16:40:35 +0900439
440#ifdef CONFIG_SH_CLK_CPG_LEGACY
Magnus Damm8b5ee112010-05-11 13:29:25 +0000441 if (clk->ops && clk->ops->init)
442 clk->ops->init(clk);
Paul Mundtf278ea82010-11-19 16:40:35 +0900443#endif
Paul Mundt28085bc2010-10-15 16:46:37 +0900444
445out_unlock:
Magnus Damm8b5ee112010-05-11 13:29:25 +0000446 mutex_unlock(&clock_list_sem);
447
Paul Mundt28085bc2010-10-15 16:46:37 +0900448 return ret;
Magnus Damm8b5ee112010-05-11 13:29:25 +0000449}
450EXPORT_SYMBOL_GPL(clk_register);
451
452void clk_unregister(struct clk *clk)
453{
454 mutex_lock(&clock_list_sem);
455 list_del(&clk->sibling);
456 list_del(&clk->node);
Paul Mundt28085bc2010-10-15 16:46:37 +0900457 clk_teardown_mapping(clk);
Magnus Damm8b5ee112010-05-11 13:29:25 +0000458 mutex_unlock(&clock_list_sem);
459}
460EXPORT_SYMBOL_GPL(clk_unregister);
461
462void clk_enable_init_clocks(void)
463{
464 struct clk *clkp;
465
466 list_for_each_entry(clkp, &clock_list, node)
467 if (clkp->flags & CLK_ENABLE_ON_INIT)
468 clk_enable(clkp);
469}
470
471unsigned long clk_get_rate(struct clk *clk)
472{
473 return clk->rate;
474}
475EXPORT_SYMBOL_GPL(clk_get_rate);
476
477int clk_set_rate(struct clk *clk, unsigned long rate)
478{
Magnus Damm8b5ee112010-05-11 13:29:25 +0000479 int ret = -EOPNOTSUPP;
480 unsigned long flags;
481
482 spin_lock_irqsave(&clock_lock, flags);
483
484 if (likely(clk->ops && clk->ops->set_rate)) {
Paul Mundt35a96c72010-11-15 18:18:32 +0900485 ret = clk->ops->set_rate(clk, rate);
Magnus Damm8b5ee112010-05-11 13:29:25 +0000486 if (ret != 0)
487 goto out_unlock;
488 } else {
489 clk->rate = rate;
490 ret = 0;
491 }
492
493 if (clk->ops && clk->ops->recalc)
494 clk->rate = clk->ops->recalc(clk);
495
496 propagate_rate(clk);
497
498out_unlock:
499 spin_unlock_irqrestore(&clock_lock, flags);
500
501 return ret;
502}
Paul Mundt9a1683d2010-11-15 18:14:43 +0900503EXPORT_SYMBOL_GPL(clk_set_rate);
Magnus Damm8b5ee112010-05-11 13:29:25 +0000504
505int clk_set_parent(struct clk *clk, struct clk *parent)
506{
507 unsigned long flags;
508 int ret = -EINVAL;
509
510 if (!parent || !clk)
511 return ret;
512 if (clk->parent == parent)
513 return 0;
514
515 spin_lock_irqsave(&clock_lock, flags);
516 if (clk->usecount == 0) {
517 if (clk->ops->set_parent)
518 ret = clk->ops->set_parent(clk, parent);
519 else
520 ret = clk_reparent(clk, parent);
521
522 if (ret == 0) {
Magnus Damm8b5ee112010-05-11 13:29:25 +0000523 if (clk->ops->recalc)
524 clk->rate = clk->ops->recalc(clk);
Paul Mundt550a1ef2010-10-13 19:24:55 +0900525 pr_debug("set parent of %p to %p (new rate %ld)\n",
Magnus Damm69395392010-10-13 07:44:36 +0000526 clk, clk->parent, clk->rate);
Magnus Damm8b5ee112010-05-11 13:29:25 +0000527 propagate_rate(clk);
528 }
529 } else
530 ret = -EBUSY;
531 spin_unlock_irqrestore(&clock_lock, flags);
532
533 return ret;
534}
535EXPORT_SYMBOL_GPL(clk_set_parent);
536
537struct clk *clk_get_parent(struct clk *clk)
538{
539 return clk->parent;
540}
541EXPORT_SYMBOL_GPL(clk_get_parent);
542
543long clk_round_rate(struct clk *clk, unsigned long rate)
544{
545 if (likely(clk->ops && clk->ops->round_rate)) {
546 unsigned long flags, rounded;
547
548 spin_lock_irqsave(&clock_lock, flags);
549 rounded = clk->ops->round_rate(clk, rate);
550 spin_unlock_irqrestore(&clock_lock, flags);
551
552 return rounded;
553 }
554
555 return clk_get_rate(clk);
556}
557EXPORT_SYMBOL_GPL(clk_round_rate);
558
Guennadi Liakhovetski6af26c62010-11-02 11:27:24 +0000559long clk_round_parent(struct clk *clk, unsigned long target,
560 unsigned long *best_freq, unsigned long *parent_freq,
561 unsigned int div_min, unsigned int div_max)
562{
563 struct cpufreq_frequency_table *freq, *best = NULL;
564 unsigned long error = ULONG_MAX, freq_high, freq_low, div;
565 struct clk *parent = clk_get_parent(clk);
566
567 if (!parent) {
568 *parent_freq = 0;
569 *best_freq = clk_round_rate(clk, target);
570 return abs(target - *best_freq);
571 }
572
573 for (freq = parent->freq_table; freq->frequency != CPUFREQ_TABLE_END;
574 freq++) {
575 if (freq->frequency == CPUFREQ_ENTRY_INVALID)
576 continue;
577
578 if (unlikely(freq->frequency / target <= div_min - 1)) {
Paul Mundta766b292010-11-08 09:40:23 +0900579 unsigned long freq_max;
580
581 freq_max = (freq->frequency + div_min / 2) / div_min;
Guennadi Liakhovetski6af26c62010-11-02 11:27:24 +0000582 if (error > target - freq_max) {
583 error = target - freq_max;
584 best = freq;
585 if (best_freq)
586 *best_freq = freq_max;
587 }
Paul Mundta766b292010-11-08 09:40:23 +0900588
Paul Mundted10b492010-11-10 18:02:25 +0900589 pr_debug("too low freq %u, error %lu\n", freq->frequency,
Paul Mundta766b292010-11-08 09:40:23 +0900590 target - freq_max);
591
Guennadi Liakhovetski6af26c62010-11-02 11:27:24 +0000592 if (!error)
593 break;
Paul Mundta766b292010-11-08 09:40:23 +0900594
Guennadi Liakhovetski6af26c62010-11-02 11:27:24 +0000595 continue;
596 }
597
598 if (unlikely(freq->frequency / target >= div_max)) {
Paul Mundta766b292010-11-08 09:40:23 +0900599 unsigned long freq_min;
600
601 freq_min = (freq->frequency + div_max / 2) / div_max;
Guennadi Liakhovetski6af26c62010-11-02 11:27:24 +0000602 if (error > freq_min - target) {
603 error = freq_min - target;
604 best = freq;
605 if (best_freq)
606 *best_freq = freq_min;
607 }
Paul Mundta766b292010-11-08 09:40:23 +0900608
Paul Mundted10b492010-11-10 18:02:25 +0900609 pr_debug("too high freq %u, error %lu\n", freq->frequency,
Paul Mundta766b292010-11-08 09:40:23 +0900610 freq_min - target);
611
Guennadi Liakhovetski6af26c62010-11-02 11:27:24 +0000612 if (!error)
613 break;
Paul Mundta766b292010-11-08 09:40:23 +0900614
Guennadi Liakhovetski6af26c62010-11-02 11:27:24 +0000615 continue;
616 }
617
Guennadi Liakhovetski6af26c62010-11-02 11:27:24 +0000618 div = freq->frequency / target;
619 freq_high = freq->frequency / div;
620 freq_low = freq->frequency / (div + 1);
Paul Mundta766b292010-11-08 09:40:23 +0900621
Guennadi Liakhovetski6af26c62010-11-02 11:27:24 +0000622 if (freq_high - target < error) {
623 error = freq_high - target;
624 best = freq;
625 if (best_freq)
626 *best_freq = freq_high;
627 }
Paul Mundta766b292010-11-08 09:40:23 +0900628
Guennadi Liakhovetski6af26c62010-11-02 11:27:24 +0000629 if (target - freq_low < error) {
630 error = target - freq_low;
631 best = freq;
632 if (best_freq)
633 *best_freq = freq_low;
634 }
Paul Mundta766b292010-11-08 09:40:23 +0900635
Guennadi Liakhovetski6af26c62010-11-02 11:27:24 +0000636 pr_debug("%u / %lu = %lu, / %lu = %lu, best %lu, parent %u\n",
637 freq->frequency, div, freq_high, div + 1, freq_low,
638 *best_freq, best->frequency);
Paul Mundta766b292010-11-08 09:40:23 +0900639
Guennadi Liakhovetski6af26c62010-11-02 11:27:24 +0000640 if (!error)
641 break;
642 }
Paul Mundta766b292010-11-08 09:40:23 +0900643
Guennadi Liakhovetski6af26c62010-11-02 11:27:24 +0000644 if (parent_freq)
645 *parent_freq = best->frequency;
Paul Mundta766b292010-11-08 09:40:23 +0900646
Guennadi Liakhovetski6af26c62010-11-02 11:27:24 +0000647 return error;
648}
649EXPORT_SYMBOL_GPL(clk_round_parent);
650
Magnus Damm8b5ee112010-05-11 13:29:25 +0000651#ifdef CONFIG_PM
Rafael J. Wysockia696b892011-03-22 20:19:28 +0000652static void clks_core_resume(void)
Magnus Damm8b5ee112010-05-11 13:29:25 +0000653{
Magnus Damm8b5ee112010-05-11 13:29:25 +0000654 struct clk *clkp;
655
Rafael J. Wysockia696b892011-03-22 20:19:28 +0000656 list_for_each_entry(clkp, &clock_list, node) {
Magnus Damm583af252011-06-13 04:42:15 +0000657 if (likely(clkp->usecount && clkp->ops)) {
Rafael J. Wysockia696b892011-03-22 20:19:28 +0000658 unsigned long rate = clkp->rate;
Magnus Damm8b5ee112010-05-11 13:29:25 +0000659
Rafael J. Wysockia696b892011-03-22 20:19:28 +0000660 if (likely(clkp->ops->set_parent))
661 clkp->ops->set_parent(clkp,
662 clkp->parent);
663 if (likely(clkp->ops->set_rate))
664 clkp->ops->set_rate(clkp, rate);
665 else if (likely(clkp->ops->recalc))
666 clkp->rate = clkp->ops->recalc(clkp);
Magnus Damm8b5ee112010-05-11 13:29:25 +0000667 }
Magnus Damm8b5ee112010-05-11 13:29:25 +0000668 }
Magnus Damm8b5ee112010-05-11 13:29:25 +0000669}
670
Rafael J. Wysockia696b892011-03-22 20:19:28 +0000671static struct syscore_ops clks_syscore_ops = {
672 .resume = clks_core_resume,
673};
674
675static int __init clk_syscore_init(void)
Magnus Damm8b5ee112010-05-11 13:29:25 +0000676{
Rafael J. Wysockia696b892011-03-22 20:19:28 +0000677 register_syscore_ops(&clks_syscore_ops);
Magnus Damm8b5ee112010-05-11 13:29:25 +0000678
679 return 0;
680}
Rafael J. Wysockia696b892011-03-22 20:19:28 +0000681subsys_initcall(clk_syscore_init);
Magnus Damm8b5ee112010-05-11 13:29:25 +0000682#endif
683
Magnus Damm794d78f2011-06-21 07:55:12 +0000684static int __init clk_late_init(void)
685{
686 unsigned long flags;
687 struct clk *clk;
688
689 /* disable all clocks with zero use count */
690 mutex_lock(&clock_list_sem);
691 spin_lock_irqsave(&clock_lock, flags);
692
693 list_for_each_entry(clk, &clock_list, node)
694 if (!clk->usecount && clk->ops && clk->ops->disable)
695 clk->ops->disable(clk);
696
697 /* from now on allow clock disable operations */
698 allow_disable = 1;
699
700 spin_unlock_irqrestore(&clock_lock, flags);
701 mutex_unlock(&clock_list_sem);
702 return 0;
703}
704late_initcall(clk_late_init);