blob: c0141f3e110941c1871502ebeeb6f81a255531fe [file] [log] [blame]
Mike Turquetteb24764902012-03-15 23:11:19 -07001/*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Standard functionality for the common clock API. See Documentation/clk.txt
10 */
11
12#include <linux/clk-private.h>
13#include <linux/module.h>
14#include <linux/mutex.h>
15#include <linux/spinlock.h>
16#include <linux/err.h>
17#include <linux/list.h>
18#include <linux/slab.h>
Grant Likely766e6a42012-04-09 14:50:06 -050019#include <linux/of.h>
Stephen Boyd46c87732012-09-24 13:38:04 -070020#include <linux/device.h>
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +053021#include <linux/init.h>
Mike Turquetteb24764902012-03-15 23:11:19 -070022
23static DEFINE_SPINLOCK(enable_lock);
24static DEFINE_MUTEX(prepare_lock);
25
26static HLIST_HEAD(clk_root_list);
27static HLIST_HEAD(clk_orphan_list);
28static LIST_HEAD(clk_notifier_list);
29
30/*** debugfs support ***/
31
32#ifdef CONFIG_COMMON_CLK_DEBUG
33#include <linux/debugfs.h>
34
35static struct dentry *rootdir;
36static struct dentry *orphandir;
37static int inited = 0;
38
Prashant Gaikwad1af599d2012-12-26 19:16:22 +053039static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
40{
41 if (!c)
42 return;
43
44 seq_printf(s, "%*s%-*s %-11d %-12d %-10lu",
45 level * 3 + 1, "",
46 30 - level * 3, c->name,
47 c->enable_count, c->prepare_count, c->rate);
48 seq_printf(s, "\n");
49}
50
51static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
52 int level)
53{
54 struct clk *child;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +053055
56 if (!c)
57 return;
58
59 clk_summary_show_one(s, c, level);
60
Sasha Levinb67bfe02013-02-27 17:06:00 -080061 hlist_for_each_entry(child, &c->children, child_node)
Prashant Gaikwad1af599d2012-12-26 19:16:22 +053062 clk_summary_show_subtree(s, child, level + 1);
63}
64
65static int clk_summary_show(struct seq_file *s, void *data)
66{
67 struct clk *c;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +053068
69 seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
70 seq_printf(s, "---------------------------------------------------------------------\n");
71
72 mutex_lock(&prepare_lock);
73
Sasha Levinb67bfe02013-02-27 17:06:00 -080074 hlist_for_each_entry(c, &clk_root_list, child_node)
Prashant Gaikwad1af599d2012-12-26 19:16:22 +053075 clk_summary_show_subtree(s, c, 0);
76
Sasha Levinb67bfe02013-02-27 17:06:00 -080077 hlist_for_each_entry(c, &clk_orphan_list, child_node)
Prashant Gaikwad1af599d2012-12-26 19:16:22 +053078 clk_summary_show_subtree(s, c, 0);
79
80 mutex_unlock(&prepare_lock);
81
82 return 0;
83}
84
85
86static int clk_summary_open(struct inode *inode, struct file *file)
87{
88 return single_open(file, clk_summary_show, inode->i_private);
89}
90
91static const struct file_operations clk_summary_fops = {
92 .open = clk_summary_open,
93 .read = seq_read,
94 .llseek = seq_lseek,
95 .release = single_release,
96};
97
Prashant Gaikwadbddca892012-12-26 19:16:23 +053098static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
99{
100 if (!c)
101 return;
102
103 seq_printf(s, "\"%s\": { ", c->name);
104 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
105 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
106 seq_printf(s, "\"rate\": %lu", c->rate);
107}
108
109static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
110{
111 struct clk *child;
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530112
113 if (!c)
114 return;
115
116 clk_dump_one(s, c, level);
117
Sasha Levinb67bfe02013-02-27 17:06:00 -0800118 hlist_for_each_entry(child, &c->children, child_node) {
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530119 seq_printf(s, ",");
120 clk_dump_subtree(s, child, level + 1);
121 }
122
123 seq_printf(s, "}");
124}
125
126static int clk_dump(struct seq_file *s, void *data)
127{
128 struct clk *c;
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530129 bool first_node = true;
130
131 seq_printf(s, "{");
132
133 mutex_lock(&prepare_lock);
134
Sasha Levinb67bfe02013-02-27 17:06:00 -0800135 hlist_for_each_entry(c, &clk_root_list, child_node) {
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530136 if (!first_node)
137 seq_printf(s, ",");
138 first_node = false;
139 clk_dump_subtree(s, c, 0);
140 }
141
Sasha Levinb67bfe02013-02-27 17:06:00 -0800142 hlist_for_each_entry(c, &clk_orphan_list, child_node) {
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530143 seq_printf(s, ",");
144 clk_dump_subtree(s, c, 0);
145 }
146
147 mutex_unlock(&prepare_lock);
148
149 seq_printf(s, "}");
150 return 0;
151}
152
153
154static int clk_dump_open(struct inode *inode, struct file *file)
155{
156 return single_open(file, clk_dump, inode->i_private);
157}
158
159static const struct file_operations clk_dump_fops = {
160 .open = clk_dump_open,
161 .read = seq_read,
162 .llseek = seq_lseek,
163 .release = single_release,
164};
165
Mike Turquetteb24764902012-03-15 23:11:19 -0700166/* caller must hold prepare_lock */
167static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
168{
169 struct dentry *d;
170 int ret = -ENOMEM;
171
172 if (!clk || !pdentry) {
173 ret = -EINVAL;
174 goto out;
175 }
176
177 d = debugfs_create_dir(clk->name, pdentry);
178 if (!d)
179 goto out;
180
181 clk->dentry = d;
182
183 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
184 (u32 *)&clk->rate);
185 if (!d)
186 goto err_out;
187
188 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
189 (u32 *)&clk->flags);
190 if (!d)
191 goto err_out;
192
193 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
194 (u32 *)&clk->prepare_count);
195 if (!d)
196 goto err_out;
197
198 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
199 (u32 *)&clk->enable_count);
200 if (!d)
201 goto err_out;
202
203 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
204 (u32 *)&clk->notifier_count);
205 if (!d)
206 goto err_out;
207
208 ret = 0;
209 goto out;
210
211err_out:
212 debugfs_remove(clk->dentry);
213out:
214 return ret;
215}
216
217/* caller must hold prepare_lock */
218static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
219{
220 struct clk *child;
Mike Turquetteb24764902012-03-15 23:11:19 -0700221 int ret = -EINVAL;;
222
223 if (!clk || !pdentry)
224 goto out;
225
226 ret = clk_debug_create_one(clk, pdentry);
227
228 if (ret)
229 goto out;
230
Sasha Levinb67bfe02013-02-27 17:06:00 -0800231 hlist_for_each_entry(child, &clk->children, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700232 clk_debug_create_subtree(child, clk->dentry);
233
234 ret = 0;
235out:
236 return ret;
237}
238
239/**
240 * clk_debug_register - add a clk node to the debugfs clk tree
241 * @clk: the clk being added to the debugfs clk tree
242 *
243 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
244 * initialized. Otherwise it bails out early since the debugfs clk tree
245 * will be created lazily by clk_debug_init as part of a late_initcall.
246 *
247 * Caller must hold prepare_lock. Only clk_init calls this function (so
248 * far) so this is taken care.
249 */
250static int clk_debug_register(struct clk *clk)
251{
252 struct clk *parent;
253 struct dentry *pdentry;
254 int ret = 0;
255
256 if (!inited)
257 goto out;
258
259 parent = clk->parent;
260
261 /*
262 * Check to see if a clk is a root clk. Also check that it is
263 * safe to add this clk to debugfs
264 */
265 if (!parent)
266 if (clk->flags & CLK_IS_ROOT)
267 pdentry = rootdir;
268 else
269 pdentry = orphandir;
270 else
271 if (parent->dentry)
272 pdentry = parent->dentry;
273 else
274 goto out;
275
276 ret = clk_debug_create_subtree(clk, pdentry);
277
278out:
279 return ret;
280}
281
282/**
283 * clk_debug_init - lazily create the debugfs clk tree visualization
284 *
285 * clks are often initialized very early during boot before memory can
286 * be dynamically allocated and well before debugfs is setup.
287 * clk_debug_init walks the clk tree hierarchy while holding
288 * prepare_lock and creates the topology as part of a late_initcall,
289 * thus insuring that clks initialized very early will still be
290 * represented in the debugfs clk tree. This function should only be
291 * called once at boot-time, and all other clks added dynamically will
292 * be done so with clk_debug_register.
293 */
294static int __init clk_debug_init(void)
295{
296 struct clk *clk;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530297 struct dentry *d;
Mike Turquetteb24764902012-03-15 23:11:19 -0700298
299 rootdir = debugfs_create_dir("clk", NULL);
300
301 if (!rootdir)
302 return -ENOMEM;
303
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530304 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, NULL,
305 &clk_summary_fops);
306 if (!d)
307 return -ENOMEM;
308
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530309 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, NULL,
310 &clk_dump_fops);
311 if (!d)
312 return -ENOMEM;
313
Mike Turquetteb24764902012-03-15 23:11:19 -0700314 orphandir = debugfs_create_dir("orphans", rootdir);
315
316 if (!orphandir)
317 return -ENOMEM;
318
319 mutex_lock(&prepare_lock);
320
Sasha Levinb67bfe02013-02-27 17:06:00 -0800321 hlist_for_each_entry(clk, &clk_root_list, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700322 clk_debug_create_subtree(clk, rootdir);
323
Sasha Levinb67bfe02013-02-27 17:06:00 -0800324 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700325 clk_debug_create_subtree(clk, orphandir);
326
327 inited = 1;
328
329 mutex_unlock(&prepare_lock);
330
331 return 0;
332}
333late_initcall(clk_debug_init);
334#else
335static inline int clk_debug_register(struct clk *clk) { return 0; }
Mike Turquette70d347e2012-03-26 11:53:47 -0700336#endif
Mike Turquetteb24764902012-03-15 23:11:19 -0700337
Mike Turquetteb24764902012-03-15 23:11:19 -0700338/* caller must hold prepare_lock */
Ulf Hansson1c155b32013-03-12 20:26:03 +0100339static void clk_unprepare_unused_subtree(struct clk *clk)
340{
341 struct clk *child;
342
343 if (!clk)
344 return;
345
346 hlist_for_each_entry(child, &clk->children, child_node)
347 clk_unprepare_unused_subtree(child);
348
349 if (clk->prepare_count)
350 return;
351
352 if (clk->flags & CLK_IGNORE_UNUSED)
353 return;
354
355 if (__clk_is_prepared(clk))
356 if (clk->ops->unprepare)
357 clk->ops->unprepare(clk->hw);
358}
359
360/* caller must hold prepare_lock */
Mike Turquetteb24764902012-03-15 23:11:19 -0700361static void clk_disable_unused_subtree(struct clk *clk)
362{
363 struct clk *child;
Mike Turquetteb24764902012-03-15 23:11:19 -0700364 unsigned long flags;
365
366 if (!clk)
367 goto out;
368
Sasha Levinb67bfe02013-02-27 17:06:00 -0800369 hlist_for_each_entry(child, &clk->children, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700370 clk_disable_unused_subtree(child);
371
372 spin_lock_irqsave(&enable_lock, flags);
373
374 if (clk->enable_count)
375 goto unlock_out;
376
377 if (clk->flags & CLK_IGNORE_UNUSED)
378 goto unlock_out;
379
Mike Turquette7c045a52012-12-04 11:00:35 -0800380 /*
381 * some gate clocks have special needs during the disable-unused
382 * sequence. call .disable_unused if available, otherwise fall
383 * back to .disable
384 */
385 if (__clk_is_enabled(clk)) {
386 if (clk->ops->disable_unused)
387 clk->ops->disable_unused(clk->hw);
388 else if (clk->ops->disable)
389 clk->ops->disable(clk->hw);
390 }
Mike Turquetteb24764902012-03-15 23:11:19 -0700391
392unlock_out:
393 spin_unlock_irqrestore(&enable_lock, flags);
394
395out:
396 return;
397}
398
399static int clk_disable_unused(void)
400{
401 struct clk *clk;
Mike Turquetteb24764902012-03-15 23:11:19 -0700402
403 mutex_lock(&prepare_lock);
404
Sasha Levinb67bfe02013-02-27 17:06:00 -0800405 hlist_for_each_entry(clk, &clk_root_list, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700406 clk_disable_unused_subtree(clk);
407
Sasha Levinb67bfe02013-02-27 17:06:00 -0800408 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700409 clk_disable_unused_subtree(clk);
410
Ulf Hansson1c155b32013-03-12 20:26:03 +0100411 hlist_for_each_entry(clk, &clk_root_list, child_node)
412 clk_unprepare_unused_subtree(clk);
413
414 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
415 clk_unprepare_unused_subtree(clk);
416
Mike Turquetteb24764902012-03-15 23:11:19 -0700417 mutex_unlock(&prepare_lock);
418
419 return 0;
420}
421late_initcall(clk_disable_unused);
Mike Turquetteb24764902012-03-15 23:11:19 -0700422
423/*** helper functions ***/
424
Russ Dill65800b22012-11-26 11:20:09 -0800425const char *__clk_get_name(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700426{
427 return !clk ? NULL : clk->name;
428}
Niels de Vos48950842012-12-13 13:12:25 +0100429EXPORT_SYMBOL_GPL(__clk_get_name);
Mike Turquetteb24764902012-03-15 23:11:19 -0700430
Russ Dill65800b22012-11-26 11:20:09 -0800431struct clk_hw *__clk_get_hw(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700432{
433 return !clk ? NULL : clk->hw;
434}
435
Russ Dill65800b22012-11-26 11:20:09 -0800436u8 __clk_get_num_parents(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700437{
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700438 return !clk ? 0 : clk->num_parents;
Mike Turquetteb24764902012-03-15 23:11:19 -0700439}
440
Russ Dill65800b22012-11-26 11:20:09 -0800441struct clk *__clk_get_parent(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700442{
443 return !clk ? NULL : clk->parent;
444}
445
Russ Dill65800b22012-11-26 11:20:09 -0800446unsigned int __clk_get_enable_count(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700447{
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700448 return !clk ? 0 : clk->enable_count;
Mike Turquetteb24764902012-03-15 23:11:19 -0700449}
450
Russ Dill65800b22012-11-26 11:20:09 -0800451unsigned int __clk_get_prepare_count(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700452{
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700453 return !clk ? 0 : clk->prepare_count;
Mike Turquetteb24764902012-03-15 23:11:19 -0700454}
455
456unsigned long __clk_get_rate(struct clk *clk)
457{
458 unsigned long ret;
459
460 if (!clk) {
Rajendra Nayak34e44fe2012-03-26 19:01:48 +0530461 ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700462 goto out;
463 }
464
465 ret = clk->rate;
466
467 if (clk->flags & CLK_IS_ROOT)
468 goto out;
469
470 if (!clk->parent)
Rajendra Nayak34e44fe2012-03-26 19:01:48 +0530471 ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700472
473out:
474 return ret;
475}
476
Russ Dill65800b22012-11-26 11:20:09 -0800477unsigned long __clk_get_flags(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700478{
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700479 return !clk ? 0 : clk->flags;
Mike Turquetteb24764902012-03-15 23:11:19 -0700480}
481
Ulf Hansson3d6ee282013-03-12 20:26:02 +0100482bool __clk_is_prepared(struct clk *clk)
483{
484 int ret;
485
486 if (!clk)
487 return false;
488
489 /*
490 * .is_prepared is optional for clocks that can prepare
491 * fall back to software usage counter if it is missing
492 */
493 if (!clk->ops->is_prepared) {
494 ret = clk->prepare_count ? 1 : 0;
495 goto out;
496 }
497
498 ret = clk->ops->is_prepared(clk->hw);
499out:
500 return !!ret;
501}
502
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700503bool __clk_is_enabled(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700504{
505 int ret;
506
507 if (!clk)
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700508 return false;
Mike Turquetteb24764902012-03-15 23:11:19 -0700509
510 /*
511 * .is_enabled is only mandatory for clocks that gate
512 * fall back to software usage counter if .is_enabled is missing
513 */
514 if (!clk->ops->is_enabled) {
515 ret = clk->enable_count ? 1 : 0;
516 goto out;
517 }
518
519 ret = clk->ops->is_enabled(clk->hw);
520out:
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700521 return !!ret;
Mike Turquetteb24764902012-03-15 23:11:19 -0700522}
523
524static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
525{
526 struct clk *child;
527 struct clk *ret;
Mike Turquetteb24764902012-03-15 23:11:19 -0700528
529 if (!strcmp(clk->name, name))
530 return clk;
531
Sasha Levinb67bfe02013-02-27 17:06:00 -0800532 hlist_for_each_entry(child, &clk->children, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -0700533 ret = __clk_lookup_subtree(name, child);
534 if (ret)
535 return ret;
536 }
537
538 return NULL;
539}
540
541struct clk *__clk_lookup(const char *name)
542{
543 struct clk *root_clk;
544 struct clk *ret;
Mike Turquetteb24764902012-03-15 23:11:19 -0700545
546 if (!name)
547 return NULL;
548
549 /* search the 'proper' clk tree first */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800550 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -0700551 ret = __clk_lookup_subtree(name, root_clk);
552 if (ret)
553 return ret;
554 }
555
556 /* if not found, then search the orphan tree */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800557 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -0700558 ret = __clk_lookup_subtree(name, root_clk);
559 if (ret)
560 return ret;
561 }
562
563 return NULL;
564}
565
566/*** clk api ***/
567
568void __clk_unprepare(struct clk *clk)
569{
570 if (!clk)
571 return;
572
573 if (WARN_ON(clk->prepare_count == 0))
574 return;
575
576 if (--clk->prepare_count > 0)
577 return;
578
579 WARN_ON(clk->enable_count > 0);
580
581 if (clk->ops->unprepare)
582 clk->ops->unprepare(clk->hw);
583
584 __clk_unprepare(clk->parent);
585}
586
587/**
588 * clk_unprepare - undo preparation of a clock source
589 * @clk: the clk being unprepare
590 *
591 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
592 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
593 * if the operation may sleep. One example is a clk which is accessed over
594 * I2c. In the complex case a clk gate operation may require a fast and a slow
595 * part. It is this reason that clk_unprepare and clk_disable are not mutually
596 * exclusive. In fact clk_disable must be called before clk_unprepare.
597 */
598void clk_unprepare(struct clk *clk)
599{
600 mutex_lock(&prepare_lock);
601 __clk_unprepare(clk);
602 mutex_unlock(&prepare_lock);
603}
604EXPORT_SYMBOL_GPL(clk_unprepare);
605
606int __clk_prepare(struct clk *clk)
607{
608 int ret = 0;
609
610 if (!clk)
611 return 0;
612
613 if (clk->prepare_count == 0) {
614 ret = __clk_prepare(clk->parent);
615 if (ret)
616 return ret;
617
618 if (clk->ops->prepare) {
619 ret = clk->ops->prepare(clk->hw);
620 if (ret) {
621 __clk_unprepare(clk->parent);
622 return ret;
623 }
624 }
625 }
626
627 clk->prepare_count++;
628
629 return 0;
630}
631
632/**
633 * clk_prepare - prepare a clock source
634 * @clk: the clk being prepared
635 *
636 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
637 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
638 * operation may sleep. One example is a clk which is accessed over I2c. In
639 * the complex case a clk ungate operation may require a fast and a slow part.
640 * It is this reason that clk_prepare and clk_enable are not mutually
641 * exclusive. In fact clk_prepare must be called before clk_enable.
642 * Returns 0 on success, -EERROR otherwise.
643 */
644int clk_prepare(struct clk *clk)
645{
646 int ret;
647
648 mutex_lock(&prepare_lock);
649 ret = __clk_prepare(clk);
650 mutex_unlock(&prepare_lock);
651
652 return ret;
653}
654EXPORT_SYMBOL_GPL(clk_prepare);
655
656static void __clk_disable(struct clk *clk)
657{
658 if (!clk)
659 return;
660
Fengguang Wue47c6a32012-07-30 14:39:54 -0700661 if (WARN_ON(IS_ERR(clk)))
662 return;
663
Mike Turquetteb24764902012-03-15 23:11:19 -0700664 if (WARN_ON(clk->enable_count == 0))
665 return;
666
667 if (--clk->enable_count > 0)
668 return;
669
670 if (clk->ops->disable)
671 clk->ops->disable(clk->hw);
672
673 __clk_disable(clk->parent);
674}
675
676/**
677 * clk_disable - gate a clock
678 * @clk: the clk being gated
679 *
680 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
681 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
682 * clk if the operation is fast and will never sleep. One example is a
683 * SoC-internal clk which is controlled via simple register writes. In the
684 * complex case a clk gate operation may require a fast and a slow part. It is
685 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
686 * In fact clk_disable must be called before clk_unprepare.
687 */
688void clk_disable(struct clk *clk)
689{
690 unsigned long flags;
691
692 spin_lock_irqsave(&enable_lock, flags);
693 __clk_disable(clk);
694 spin_unlock_irqrestore(&enable_lock, flags);
695}
696EXPORT_SYMBOL_GPL(clk_disable);
697
698static int __clk_enable(struct clk *clk)
699{
700 int ret = 0;
701
702 if (!clk)
703 return 0;
704
705 if (WARN_ON(clk->prepare_count == 0))
706 return -ESHUTDOWN;
707
708 if (clk->enable_count == 0) {
709 ret = __clk_enable(clk->parent);
710
711 if (ret)
712 return ret;
713
714 if (clk->ops->enable) {
715 ret = clk->ops->enable(clk->hw);
716 if (ret) {
717 __clk_disable(clk->parent);
718 return ret;
719 }
720 }
721 }
722
723 clk->enable_count++;
724 return 0;
725}
726
727/**
728 * clk_enable - ungate a clock
729 * @clk: the clk being ungated
730 *
731 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
732 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
733 * if the operation will never sleep. One example is a SoC-internal clk which
734 * is controlled via simple register writes. In the complex case a clk ungate
735 * operation may require a fast and a slow part. It is this reason that
736 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
737 * must be called before clk_enable. Returns 0 on success, -EERROR
738 * otherwise.
739 */
740int clk_enable(struct clk *clk)
741{
742 unsigned long flags;
743 int ret;
744
745 spin_lock_irqsave(&enable_lock, flags);
746 ret = __clk_enable(clk);
747 spin_unlock_irqrestore(&enable_lock, flags);
748
749 return ret;
750}
751EXPORT_SYMBOL_GPL(clk_enable);
752
753/**
Mike Turquetteb24764902012-03-15 23:11:19 -0700754 * __clk_round_rate - round the given rate for a clk
755 * @clk: round the rate of this clock
756 *
757 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
758 */
759unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
760{
Shawn Guo81536e02012-04-12 20:50:17 +0800761 unsigned long parent_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700762
763 if (!clk)
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700764 return 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700765
Shawn Guof4d8af22012-04-12 20:50:19 +0800766 if (!clk->ops->round_rate) {
767 if (clk->flags & CLK_SET_RATE_PARENT)
768 return __clk_round_rate(clk->parent, rate);
769 else
770 return clk->rate;
771 }
Mike Turquetteb24764902012-03-15 23:11:19 -0700772
Shawn Guo81536e02012-04-12 20:50:17 +0800773 if (clk->parent)
774 parent_rate = clk->parent->rate;
775
776 return clk->ops->round_rate(clk->hw, rate, &parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -0700777}
778
779/**
780 * clk_round_rate - round the given rate for a clk
781 * @clk: the clk for which we are rounding a rate
782 * @rate: the rate which is to be rounded
783 *
784 * Takes in a rate as input and rounds it to a rate that the clk can actually
785 * use which is then returned. If clk doesn't support round_rate operation
786 * then the parent rate is returned.
787 */
788long clk_round_rate(struct clk *clk, unsigned long rate)
789{
790 unsigned long ret;
791
792 mutex_lock(&prepare_lock);
793 ret = __clk_round_rate(clk, rate);
794 mutex_unlock(&prepare_lock);
795
796 return ret;
797}
798EXPORT_SYMBOL_GPL(clk_round_rate);
799
800/**
801 * __clk_notify - call clk notifier chain
802 * @clk: struct clk * that is changing rate
803 * @msg: clk notifier type (see include/linux/clk.h)
804 * @old_rate: old clk rate
805 * @new_rate: new clk rate
806 *
807 * Triggers a notifier call chain on the clk rate-change notification
808 * for 'clk'. Passes a pointer to the struct clk and the previous
809 * and current rates to the notifier callback. Intended to be called by
810 * internal clock code only. Returns NOTIFY_DONE from the last driver
811 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
812 * a driver returns that.
813 */
814static int __clk_notify(struct clk *clk, unsigned long msg,
815 unsigned long old_rate, unsigned long new_rate)
816{
817 struct clk_notifier *cn;
818 struct clk_notifier_data cnd;
819 int ret = NOTIFY_DONE;
820
821 cnd.clk = clk;
822 cnd.old_rate = old_rate;
823 cnd.new_rate = new_rate;
824
825 list_for_each_entry(cn, &clk_notifier_list, node) {
826 if (cn->clk == clk) {
827 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
828 &cnd);
829 break;
830 }
831 }
832
833 return ret;
834}
835
836/**
837 * __clk_recalc_rates
838 * @clk: first clk in the subtree
839 * @msg: notification type (see include/linux/clk.h)
840 *
841 * Walks the subtree of clks starting with clk and recalculates rates as it
842 * goes. Note that if a clk does not implement the .recalc_rate callback then
843 * it is assumed that the clock will take on the rate of it's parent.
844 *
845 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
846 * if necessary.
847 *
848 * Caller must hold prepare_lock.
849 */
850static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
851{
852 unsigned long old_rate;
853 unsigned long parent_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700854 struct clk *child;
855
856 old_rate = clk->rate;
857
858 if (clk->parent)
859 parent_rate = clk->parent->rate;
860
861 if (clk->ops->recalc_rate)
862 clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
863 else
864 clk->rate = parent_rate;
865
866 /*
867 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
868 * & ABORT_RATE_CHANGE notifiers
869 */
870 if (clk->notifier_count && msg)
871 __clk_notify(clk, msg, old_rate, clk->rate);
872
Sasha Levinb67bfe02013-02-27 17:06:00 -0800873 hlist_for_each_entry(child, &clk->children, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700874 __clk_recalc_rates(child, msg);
875}
876
877/**
Ulf Hanssona093bde2012-08-31 14:21:28 +0200878 * clk_get_rate - return the rate of clk
879 * @clk: the clk whose rate is being returned
880 *
881 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
882 * is set, which means a recalc_rate will be issued.
883 * If clk is NULL then returns 0.
884 */
885unsigned long clk_get_rate(struct clk *clk)
886{
887 unsigned long rate;
888
889 mutex_lock(&prepare_lock);
890
891 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
892 __clk_recalc_rates(clk, 0);
893
894 rate = __clk_get_rate(clk);
895 mutex_unlock(&prepare_lock);
896
897 return rate;
898}
899EXPORT_SYMBOL_GPL(clk_get_rate);
900
901/**
Mike Turquetteb24764902012-03-15 23:11:19 -0700902 * __clk_speculate_rates
903 * @clk: first clk in the subtree
904 * @parent_rate: the "future" rate of clk's parent
905 *
906 * Walks the subtree of clks starting with clk, speculating rates as it
907 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
908 *
909 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
910 * pre-rate change notifications and returns early if no clks in the
911 * subtree have subscribed to the notifications. Note that if a clk does not
912 * implement the .recalc_rate callback then it is assumed that the clock will
913 * take on the rate of it's parent.
914 *
915 * Caller must hold prepare_lock.
916 */
917static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
918{
Mike Turquetteb24764902012-03-15 23:11:19 -0700919 struct clk *child;
920 unsigned long new_rate;
921 int ret = NOTIFY_DONE;
922
923 if (clk->ops->recalc_rate)
924 new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
925 else
926 new_rate = parent_rate;
927
928 /* abort the rate change if a driver returns NOTIFY_BAD */
929 if (clk->notifier_count)
930 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
931
932 if (ret == NOTIFY_BAD)
933 goto out;
934
Sasha Levinb67bfe02013-02-27 17:06:00 -0800935 hlist_for_each_entry(child, &clk->children, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -0700936 ret = __clk_speculate_rates(child, new_rate);
937 if (ret == NOTIFY_BAD)
938 break;
939 }
940
941out:
942 return ret;
943}
944
945static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
946{
947 struct clk *child;
Mike Turquetteb24764902012-03-15 23:11:19 -0700948
949 clk->new_rate = new_rate;
950
Sasha Levinb67bfe02013-02-27 17:06:00 -0800951 hlist_for_each_entry(child, &clk->children, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -0700952 if (child->ops->recalc_rate)
953 child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
954 else
955 child->new_rate = new_rate;
956 clk_calc_subtree(child, child->new_rate);
957 }
958}
959
960/*
961 * calculate the new rates returning the topmost clock that has to be
962 * changed.
963 */
964static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
965{
966 struct clk *top = clk;
Shawn Guo81536e02012-04-12 20:50:17 +0800967 unsigned long best_parent_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700968 unsigned long new_rate;
969
Mike Turquette7452b212012-03-26 14:45:36 -0700970 /* sanity */
971 if (IS_ERR_OR_NULL(clk))
972 return NULL;
973
Mike Turquette63f5c3b2012-05-02 16:23:43 -0700974 /* save parent rate, if it exists */
975 if (clk->parent)
976 best_parent_rate = clk->parent->rate;
977
Mike Turquette7452b212012-03-26 14:45:36 -0700978 /* never propagate up to the parent */
979 if (!(clk->flags & CLK_SET_RATE_PARENT)) {
980 if (!clk->ops->round_rate) {
981 clk->new_rate = clk->rate;
982 return NULL;
Mike Turquette7452b212012-03-26 14:45:36 -0700983 }
Mike Turquette63f5c3b2012-05-02 16:23:43 -0700984 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
985 goto out;
Mike Turquette7452b212012-03-26 14:45:36 -0700986 }
987
988 /* need clk->parent from here on out */
989 if (!clk->parent) {
990 pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
Mike Turquetteb24764902012-03-15 23:11:19 -0700991 return NULL;
992 }
993
Mike Turquette7452b212012-03-26 14:45:36 -0700994 if (!clk->ops->round_rate) {
Mike Turquetteb24764902012-03-15 23:11:19 -0700995 top = clk_calc_new_rates(clk->parent, rate);
Viresh Kumar1b2f9902012-04-17 16:45:38 +0530996 new_rate = clk->parent->new_rate;
Mike Turquetteb24764902012-03-15 23:11:19 -0700997
998 goto out;
999 }
1000
Mike Turquette7452b212012-03-26 14:45:36 -07001001 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001002
1003 if (best_parent_rate != clk->parent->rate) {
1004 top = clk_calc_new_rates(clk->parent, best_parent_rate);
1005
1006 goto out;
1007 }
1008
1009out:
1010 clk_calc_subtree(clk, new_rate);
1011
1012 return top;
1013}
1014
1015/*
1016 * Notify about rate changes in a subtree. Always walk down the whole tree
1017 * so that in case of an error we can walk down the whole tree again and
1018 * abort the change.
1019 */
1020static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
1021{
Mike Turquetteb24764902012-03-15 23:11:19 -07001022 struct clk *child, *fail_clk = NULL;
1023 int ret = NOTIFY_DONE;
1024
1025 if (clk->rate == clk->new_rate)
1026 return 0;
1027
1028 if (clk->notifier_count) {
1029 ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
1030 if (ret == NOTIFY_BAD)
1031 fail_clk = clk;
1032 }
1033
Sasha Levinb67bfe02013-02-27 17:06:00 -08001034 hlist_for_each_entry(child, &clk->children, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -07001035 clk = clk_propagate_rate_change(child, event);
1036 if (clk)
1037 fail_clk = clk;
1038 }
1039
1040 return fail_clk;
1041}
1042
1043/*
1044 * walk down a subtree and set the new rates notifying the rate
1045 * change on the way
1046 */
1047static void clk_change_rate(struct clk *clk)
1048{
1049 struct clk *child;
1050 unsigned long old_rate;
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001051 unsigned long best_parent_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001052
1053 old_rate = clk->rate;
1054
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001055 if (clk->parent)
1056 best_parent_rate = clk->parent->rate;
1057
Mike Turquetteb24764902012-03-15 23:11:19 -07001058 if (clk->ops->set_rate)
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001059 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001060
1061 if (clk->ops->recalc_rate)
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001062 clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001063 else
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001064 clk->rate = best_parent_rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07001065
1066 if (clk->notifier_count && old_rate != clk->rate)
1067 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
1068
Sasha Levinb67bfe02013-02-27 17:06:00 -08001069 hlist_for_each_entry(child, &clk->children, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -07001070 clk_change_rate(child);
1071}
1072
1073/**
1074 * clk_set_rate - specify a new rate for clk
1075 * @clk: the clk whose rate is being changed
1076 * @rate: the new rate for clk
1077 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001078 * In the simplest case clk_set_rate will only adjust the rate of clk.
Mike Turquetteb24764902012-03-15 23:11:19 -07001079 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001080 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1081 * propagate up to clk's parent; whether or not this happens depends on the
1082 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
1083 * after calling .round_rate then upstream parent propagation is ignored. If
1084 * *parent_rate comes back with a new rate for clk's parent then we propagate
1085 * up to clk's parent and set it's rate. Upward propagation will continue
1086 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1087 * .round_rate stops requesting changes to clk's parent_rate.
Mike Turquetteb24764902012-03-15 23:11:19 -07001088 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001089 * Rate changes are accomplished via tree traversal that also recalculates the
1090 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
Mike Turquetteb24764902012-03-15 23:11:19 -07001091 *
1092 * Returns 0 on success, -EERROR otherwise.
1093 */
1094int clk_set_rate(struct clk *clk, unsigned long rate)
1095{
1096 struct clk *top, *fail_clk;
1097 int ret = 0;
1098
1099 /* prevent racing with updates to the clock topology */
1100 mutex_lock(&prepare_lock);
1101
1102 /* bail early if nothing to do */
1103 if (rate == clk->rate)
1104 goto out;
1105
Saravana Kannan7e0fa1b2012-05-15 13:43:42 -07001106 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
Viresh Kumar0e1c0302012-04-11 16:03:42 +05301107 ret = -EBUSY;
1108 goto out;
1109 }
1110
Mike Turquetteb24764902012-03-15 23:11:19 -07001111 /* calculate new rates and get the topmost changed clock */
1112 top = clk_calc_new_rates(clk, rate);
1113 if (!top) {
1114 ret = -EINVAL;
1115 goto out;
1116 }
1117
1118 /* notify that we are about to change rates */
1119 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1120 if (fail_clk) {
1121 pr_warn("%s: failed to set %s rate\n", __func__,
1122 fail_clk->name);
1123 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1124 ret = -EBUSY;
1125 goto out;
1126 }
1127
1128 /* change the rates */
1129 clk_change_rate(top);
1130
Mike Turquetteb24764902012-03-15 23:11:19 -07001131out:
1132 mutex_unlock(&prepare_lock);
1133
1134 return ret;
1135}
1136EXPORT_SYMBOL_GPL(clk_set_rate);
1137
1138/**
1139 * clk_get_parent - return the parent of a clk
1140 * @clk: the clk whose parent gets returned
1141 *
1142 * Simply returns clk->parent. Returns NULL if clk is NULL.
1143 */
1144struct clk *clk_get_parent(struct clk *clk)
1145{
1146 struct clk *parent;
1147
1148 mutex_lock(&prepare_lock);
1149 parent = __clk_get_parent(clk);
1150 mutex_unlock(&prepare_lock);
1151
1152 return parent;
1153}
1154EXPORT_SYMBOL_GPL(clk_get_parent);
1155
1156/*
1157 * .get_parent is mandatory for clocks with multiple possible parents. It is
1158 * optional for single-parent clocks. Always call .get_parent if it is
1159 * available and WARN if it is missing for multi-parent clocks.
1160 *
1161 * For single-parent clocks without .get_parent, first check to see if the
1162 * .parents array exists, and if so use it to avoid an expensive tree
1163 * traversal. If .parents does not exist then walk the tree with __clk_lookup.
1164 */
1165static struct clk *__clk_init_parent(struct clk *clk)
1166{
1167 struct clk *ret = NULL;
1168 u8 index;
1169
1170 /* handle the trivial cases */
1171
1172 if (!clk->num_parents)
1173 goto out;
1174
1175 if (clk->num_parents == 1) {
1176 if (IS_ERR_OR_NULL(clk->parent))
1177 ret = clk->parent = __clk_lookup(clk->parent_names[0]);
1178 ret = clk->parent;
1179 goto out;
1180 }
1181
1182 if (!clk->ops->get_parent) {
1183 WARN(!clk->ops->get_parent,
1184 "%s: multi-parent clocks must implement .get_parent\n",
1185 __func__);
1186 goto out;
1187 };
1188
1189 /*
1190 * Do our best to cache parent clocks in clk->parents. This prevents
1191 * unnecessary and expensive calls to __clk_lookup. We don't set
1192 * clk->parent here; that is done by the calling function
1193 */
1194
1195 index = clk->ops->get_parent(clk->hw);
1196
1197 if (!clk->parents)
1198 clk->parents =
Rajendra Nayak79750592012-06-06 14:41:31 +05301199 kzalloc((sizeof(struct clk*) * clk->num_parents),
Mike Turquetteb24764902012-03-15 23:11:19 -07001200 GFP_KERNEL);
1201
1202 if (!clk->parents)
1203 ret = __clk_lookup(clk->parent_names[index]);
1204 else if (!clk->parents[index])
1205 ret = clk->parents[index] =
1206 __clk_lookup(clk->parent_names[index]);
1207 else
1208 ret = clk->parents[index];
1209
1210out:
1211 return ret;
1212}
1213
1214void __clk_reparent(struct clk *clk, struct clk *new_parent)
1215{
1216#ifdef CONFIG_COMMON_CLK_DEBUG
1217 struct dentry *d;
1218 struct dentry *new_parent_d;
1219#endif
1220
1221 if (!clk || !new_parent)
1222 return;
1223
1224 hlist_del(&clk->child_node);
1225
1226 if (new_parent)
1227 hlist_add_head(&clk->child_node, &new_parent->children);
1228 else
1229 hlist_add_head(&clk->child_node, &clk_orphan_list);
1230
1231#ifdef CONFIG_COMMON_CLK_DEBUG
1232 if (!inited)
1233 goto out;
1234
1235 if (new_parent)
1236 new_parent_d = new_parent->dentry;
1237 else
1238 new_parent_d = orphandir;
1239
1240 d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
1241 new_parent_d, clk->name);
1242 if (d)
1243 clk->dentry = d;
1244 else
1245 pr_debug("%s: failed to rename debugfs entry for %s\n",
1246 __func__, clk->name);
1247out:
1248#endif
1249
1250 clk->parent = new_parent;
1251
1252 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1253}
1254
1255static int __clk_set_parent(struct clk *clk, struct clk *parent)
1256{
1257 struct clk *old_parent;
1258 unsigned long flags;
1259 int ret = -EINVAL;
1260 u8 i;
1261
1262 old_parent = clk->parent;
1263
Rajendra Nayak863b1322012-07-03 12:11:41 +05301264 if (!clk->parents)
Rajendra Nayak79750592012-06-06 14:41:31 +05301265 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1266 GFP_KERNEL);
Mike Turquetteb24764902012-03-15 23:11:19 -07001267
1268 /*
Rajendra Nayak863b1322012-07-03 12:11:41 +05301269 * find index of new parent clock using cached parent ptrs,
1270 * or if not yet cached, use string name comparison and cache
1271 * them now to avoid future calls to __clk_lookup.
Mike Turquetteb24764902012-03-15 23:11:19 -07001272 */
Rajendra Nayak863b1322012-07-03 12:11:41 +05301273 for (i = 0; i < clk->num_parents; i++) {
1274 if (clk->parents && clk->parents[i] == parent)
1275 break;
1276 else if (!strcmp(clk->parent_names[i], parent->name)) {
1277 if (clk->parents)
1278 clk->parents[i] = __clk_lookup(parent->name);
1279 break;
1280 }
1281 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001282
1283 if (i == clk->num_parents) {
1284 pr_debug("%s: clock %s is not a possible parent of clock %s\n",
1285 __func__, parent->name, clk->name);
1286 goto out;
1287 }
1288
1289 /* migrate prepare and enable */
1290 if (clk->prepare_count)
1291 __clk_prepare(parent);
1292
1293 /* FIXME replace with clk_is_enabled(clk) someday */
1294 spin_lock_irqsave(&enable_lock, flags);
1295 if (clk->enable_count)
1296 __clk_enable(parent);
1297 spin_unlock_irqrestore(&enable_lock, flags);
1298
1299 /* change clock input source */
1300 ret = clk->ops->set_parent(clk->hw, i);
1301
1302 /* clean up old prepare and enable */
1303 spin_lock_irqsave(&enable_lock, flags);
1304 if (clk->enable_count)
1305 __clk_disable(old_parent);
1306 spin_unlock_irqrestore(&enable_lock, flags);
1307
1308 if (clk->prepare_count)
1309 __clk_unprepare(old_parent);
1310
1311out:
1312 return ret;
1313}
1314
1315/**
1316 * clk_set_parent - switch the parent of a mux clk
1317 * @clk: the mux clk whose input we are switching
1318 * @parent: the new input to clk
1319 *
1320 * Re-parent clk to use parent as it's new input source. If clk has the
1321 * CLK_SET_PARENT_GATE flag set then clk must be gated for this
1322 * operation to succeed. After successfully changing clk's parent
1323 * clk_set_parent will update the clk topology, sysfs topology and
1324 * propagate rate recalculation via __clk_recalc_rates. Returns 0 on
1325 * success, -EERROR otherwise.
1326 */
1327int clk_set_parent(struct clk *clk, struct clk *parent)
1328{
1329 int ret = 0;
1330
1331 if (!clk || !clk->ops)
1332 return -EINVAL;
1333
1334 if (!clk->ops->set_parent)
1335 return -ENOSYS;
1336
1337 /* prevent racing with updates to the clock topology */
1338 mutex_lock(&prepare_lock);
1339
1340 if (clk->parent == parent)
1341 goto out;
1342
1343 /* propagate PRE_RATE_CHANGE notifications */
1344 if (clk->notifier_count)
1345 ret = __clk_speculate_rates(clk, parent->rate);
1346
1347 /* abort if a driver objects */
1348 if (ret == NOTIFY_STOP)
1349 goto out;
1350
1351 /* only re-parent if the clock is not in use */
1352 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
1353 ret = -EBUSY;
1354 else
1355 ret = __clk_set_parent(clk, parent);
1356
1357 /* propagate ABORT_RATE_CHANGE if .set_parent failed */
1358 if (ret) {
1359 __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1360 goto out;
1361 }
1362
1363 /* propagate rate recalculation downstream */
1364 __clk_reparent(clk, parent);
1365
1366out:
1367 mutex_unlock(&prepare_lock);
1368
1369 return ret;
1370}
1371EXPORT_SYMBOL_GPL(clk_set_parent);
1372
1373/**
1374 * __clk_init - initialize the data structures in a struct clk
1375 * @dev: device initializing this clk, placeholder for now
1376 * @clk: clk being initialized
1377 *
1378 * Initializes the lists in struct clk, queries the hardware for the
1379 * parent and rate and sets them both.
Mike Turquetteb24764902012-03-15 23:11:19 -07001380 */
Mike Turquetted1302a32012-03-29 14:30:40 -07001381int __clk_init(struct device *dev, struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -07001382{
Mike Turquetted1302a32012-03-29 14:30:40 -07001383 int i, ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001384 struct clk *orphan;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001385 struct hlist_node *tmp2;
Mike Turquetteb24764902012-03-15 23:11:19 -07001386
1387 if (!clk)
Mike Turquetted1302a32012-03-29 14:30:40 -07001388 return -EINVAL;
Mike Turquetteb24764902012-03-15 23:11:19 -07001389
1390 mutex_lock(&prepare_lock);
1391
1392 /* check to see if a clock with this name is already registered */
Mike Turquetted1302a32012-03-29 14:30:40 -07001393 if (__clk_lookup(clk->name)) {
1394 pr_debug("%s: clk %s already initialized\n",
1395 __func__, clk->name);
1396 ret = -EEXIST;
Mike Turquetteb24764902012-03-15 23:11:19 -07001397 goto out;
Mike Turquetted1302a32012-03-29 14:30:40 -07001398 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001399
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07001400 /* check that clk_ops are sane. See Documentation/clk.txt */
1401 if (clk->ops->set_rate &&
1402 !(clk->ops->round_rate && clk->ops->recalc_rate)) {
1403 pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
1404 __func__, clk->name);
Mike Turquetted1302a32012-03-29 14:30:40 -07001405 ret = -EINVAL;
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07001406 goto out;
1407 }
1408
1409 if (clk->ops->set_parent && !clk->ops->get_parent) {
1410 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1411 __func__, clk->name);
Mike Turquetted1302a32012-03-29 14:30:40 -07001412 ret = -EINVAL;
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07001413 goto out;
1414 }
1415
Mike Turquetteb24764902012-03-15 23:11:19 -07001416 /* throw a WARN if any entries in parent_names are NULL */
1417 for (i = 0; i < clk->num_parents; i++)
1418 WARN(!clk->parent_names[i],
1419 "%s: invalid NULL in %s's .parent_names\n",
1420 __func__, clk->name);
1421
1422 /*
1423 * Allocate an array of struct clk *'s to avoid unnecessary string
1424 * look-ups of clk's possible parents. This can fail for clocks passed
1425 * in to clk_init during early boot; thus any access to clk->parents[]
1426 * must always check for a NULL pointer and try to populate it if
1427 * necessary.
1428 *
1429 * If clk->parents is not NULL we skip this entire block. This allows
1430 * for clock drivers to statically initialize clk->parents.
1431 */
Rajendra Nayak9ca1c5a2012-06-06 14:41:30 +05301432 if (clk->num_parents > 1 && !clk->parents) {
1433 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
Mike Turquetteb24764902012-03-15 23:11:19 -07001434 GFP_KERNEL);
1435 /*
1436 * __clk_lookup returns NULL for parents that have not been
1437 * clk_init'd; thus any access to clk->parents[] must check
1438 * for a NULL pointer. We can always perform lazy lookups for
1439 * missing parents later on.
1440 */
1441 if (clk->parents)
1442 for (i = 0; i < clk->num_parents; i++)
1443 clk->parents[i] =
1444 __clk_lookup(clk->parent_names[i]);
1445 }
1446
1447 clk->parent = __clk_init_parent(clk);
1448
1449 /*
1450 * Populate clk->parent if parent has already been __clk_init'd. If
1451 * parent has not yet been __clk_init'd then place clk in the orphan
1452 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
1453 * clk list.
1454 *
1455 * Every time a new clk is clk_init'd then we walk the list of orphan
1456 * clocks and re-parent any that are children of the clock currently
1457 * being clk_init'd.
1458 */
1459 if (clk->parent)
1460 hlist_add_head(&clk->child_node,
1461 &clk->parent->children);
1462 else if (clk->flags & CLK_IS_ROOT)
1463 hlist_add_head(&clk->child_node, &clk_root_list);
1464 else
1465 hlist_add_head(&clk->child_node, &clk_orphan_list);
1466
1467 /*
1468 * Set clk's rate. The preferred method is to use .recalc_rate. For
1469 * simple clocks and lazy developers the default fallback is to use the
1470 * parent's rate. If a clock doesn't have a parent (or is orphaned)
1471 * then rate is set to zero.
1472 */
1473 if (clk->ops->recalc_rate)
1474 clk->rate = clk->ops->recalc_rate(clk->hw,
1475 __clk_get_rate(clk->parent));
1476 else if (clk->parent)
1477 clk->rate = clk->parent->rate;
1478 else
1479 clk->rate = 0;
1480
1481 /*
1482 * walk the list of orphan clocks and reparent any that are children of
1483 * this clock
1484 */
Sasha Levinb67bfe02013-02-27 17:06:00 -08001485 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
Martin Fuzzey1f61e5f2012-11-22 20:15:05 +01001486 if (orphan->ops->get_parent) {
1487 i = orphan->ops->get_parent(orphan->hw);
1488 if (!strcmp(clk->name, orphan->parent_names[i]))
1489 __clk_reparent(orphan, clk);
1490 continue;
1491 }
1492
Mike Turquetteb24764902012-03-15 23:11:19 -07001493 for (i = 0; i < orphan->num_parents; i++)
1494 if (!strcmp(clk->name, orphan->parent_names[i])) {
1495 __clk_reparent(orphan, clk);
1496 break;
1497 }
Martin Fuzzey1f61e5f2012-11-22 20:15:05 +01001498 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001499
1500 /*
1501 * optional platform-specific magic
1502 *
1503 * The .init callback is not used by any of the basic clock types, but
1504 * exists for weird hardware that must perform initialization magic.
1505 * Please consider other ways of solving initialization problems before
1506 * using this callback, as it's use is discouraged.
1507 */
1508 if (clk->ops->init)
1509 clk->ops->init(clk->hw);
1510
1511 clk_debug_register(clk);
1512
1513out:
1514 mutex_unlock(&prepare_lock);
1515
Mike Turquetted1302a32012-03-29 14:30:40 -07001516 return ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07001517}
1518
1519/**
Saravana Kannan0197b3e2012-04-25 22:58:56 -07001520 * __clk_register - register a clock and return a cookie.
1521 *
1522 * Same as clk_register, except that the .clk field inside hw shall point to a
1523 * preallocated (generally statically allocated) struct clk. None of the fields
1524 * of the struct clk need to be initialized.
1525 *
1526 * The data pointed to by .init and .clk field shall NOT be marked as init
1527 * data.
1528 *
1529 * __clk_register is only exposed via clk-private.h and is intended for use with
1530 * very large numbers of clocks that need to be statically initialized. It is
1531 * a layering violation to include clk-private.h from any code which implements
1532 * a clock's .ops; as such any statically initialized clock data MUST be in a
1533 * separate C file from the logic that implements it's operations. Returns 0
1534 * on success, otherwise an error code.
1535 */
1536struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1537{
1538 int ret;
1539 struct clk *clk;
1540
1541 clk = hw->clk;
1542 clk->name = hw->init->name;
1543 clk->ops = hw->init->ops;
1544 clk->hw = hw;
1545 clk->flags = hw->init->flags;
1546 clk->parent_names = hw->init->parent_names;
1547 clk->num_parents = hw->init->num_parents;
1548
1549 ret = __clk_init(dev, clk);
1550 if (ret)
1551 return ERR_PTR(ret);
1552
1553 return clk;
1554}
1555EXPORT_SYMBOL_GPL(__clk_register);
1556
Stephen Boyd46c87732012-09-24 13:38:04 -07001557static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -07001558{
Mike Turquetted1302a32012-03-29 14:30:40 -07001559 int i, ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07001560
Saravana Kannan0197b3e2012-04-25 22:58:56 -07001561 clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1562 if (!clk->name) {
1563 pr_err("%s: could not allocate clk->name\n", __func__);
1564 ret = -ENOMEM;
1565 goto fail_name;
1566 }
1567 clk->ops = hw->init->ops;
Mike Turquetteb24764902012-03-15 23:11:19 -07001568 clk->hw = hw;
Saravana Kannan0197b3e2012-04-25 22:58:56 -07001569 clk->flags = hw->init->flags;
1570 clk->num_parents = hw->init->num_parents;
Mike Turquetteb24764902012-03-15 23:11:19 -07001571 hw->clk = clk;
1572
Mike Turquetted1302a32012-03-29 14:30:40 -07001573 /* allocate local copy in case parent_names is __initdata */
Saravana Kannan0197b3e2012-04-25 22:58:56 -07001574 clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
Mike Turquetted1302a32012-03-29 14:30:40 -07001575 GFP_KERNEL);
Mike Turquetteb24764902012-03-15 23:11:19 -07001576
Mike Turquetted1302a32012-03-29 14:30:40 -07001577 if (!clk->parent_names) {
1578 pr_err("%s: could not allocate clk->parent_names\n", __func__);
1579 ret = -ENOMEM;
1580 goto fail_parent_names;
1581 }
1582
1583
1584 /* copy each string name in case parent_names is __initdata */
Saravana Kannan0197b3e2012-04-25 22:58:56 -07001585 for (i = 0; i < clk->num_parents; i++) {
1586 clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
1587 GFP_KERNEL);
Mike Turquetted1302a32012-03-29 14:30:40 -07001588 if (!clk->parent_names[i]) {
1589 pr_err("%s: could not copy parent_names\n", __func__);
1590 ret = -ENOMEM;
1591 goto fail_parent_names_copy;
1592 }
1593 }
1594
1595 ret = __clk_init(dev, clk);
1596 if (!ret)
Stephen Boyd46c87732012-09-24 13:38:04 -07001597 return 0;
Mike Turquetted1302a32012-03-29 14:30:40 -07001598
1599fail_parent_names_copy:
1600 while (--i >= 0)
1601 kfree(clk->parent_names[i]);
1602 kfree(clk->parent_names);
1603fail_parent_names:
Saravana Kannan0197b3e2012-04-25 22:58:56 -07001604 kfree(clk->name);
1605fail_name:
Stephen Boyd46c87732012-09-24 13:38:04 -07001606 return ret;
1607}
1608
1609/**
1610 * clk_register - allocate a new clock, register it and return an opaque cookie
1611 * @dev: device that is registering this clock
1612 * @hw: link to hardware-specific clock data
1613 *
1614 * clk_register is the primary interface for populating the clock tree with new
1615 * clock nodes. It returns a pointer to the newly allocated struct clk which
1616 * cannot be dereferenced by driver code but may be used in conjuction with the
1617 * rest of the clock API. In the event of an error clk_register will return an
1618 * error code; drivers must test for an error code after calling clk_register.
1619 */
1620struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1621{
1622 int ret;
1623 struct clk *clk;
1624
1625 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1626 if (!clk) {
1627 pr_err("%s: could not allocate clk\n", __func__);
1628 ret = -ENOMEM;
1629 goto fail_out;
1630 }
1631
1632 ret = _clk_register(dev, hw, clk);
1633 if (!ret)
1634 return clk;
1635
Mike Turquetted1302a32012-03-29 14:30:40 -07001636 kfree(clk);
1637fail_out:
1638 return ERR_PTR(ret);
Mike Turquetteb24764902012-03-15 23:11:19 -07001639}
1640EXPORT_SYMBOL_GPL(clk_register);
1641
Mark Brown1df5c932012-04-18 09:07:12 +01001642/**
1643 * clk_unregister - unregister a currently registered clock
1644 * @clk: clock to unregister
1645 *
1646 * Currently unimplemented.
1647 */
1648void clk_unregister(struct clk *clk) {}
1649EXPORT_SYMBOL_GPL(clk_unregister);
1650
Stephen Boyd46c87732012-09-24 13:38:04 -07001651static void devm_clk_release(struct device *dev, void *res)
1652{
1653 clk_unregister(res);
1654}
1655
1656/**
1657 * devm_clk_register - resource managed clk_register()
1658 * @dev: device that is registering this clock
1659 * @hw: link to hardware-specific clock data
1660 *
1661 * Managed clk_register(). Clocks returned from this function are
1662 * automatically clk_unregister()ed on driver detach. See clk_register() for
1663 * more information.
1664 */
1665struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
1666{
1667 struct clk *clk;
1668 int ret;
1669
1670 clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL);
1671 if (!clk)
1672 return ERR_PTR(-ENOMEM);
1673
1674 ret = _clk_register(dev, hw, clk);
1675 if (!ret) {
1676 devres_add(dev, clk);
1677 } else {
1678 devres_free(clk);
1679 clk = ERR_PTR(ret);
1680 }
1681
1682 return clk;
1683}
1684EXPORT_SYMBOL_GPL(devm_clk_register);
1685
1686static int devm_clk_match(struct device *dev, void *res, void *data)
1687{
1688 struct clk *c = res;
1689 if (WARN_ON(!c))
1690 return 0;
1691 return c == data;
1692}
1693
1694/**
1695 * devm_clk_unregister - resource managed clk_unregister()
1696 * @clk: clock to unregister
1697 *
1698 * Deallocate a clock allocated with devm_clk_register(). Normally
1699 * this function will not need to be called and the resource management
1700 * code will ensure that the resource is freed.
1701 */
1702void devm_clk_unregister(struct device *dev, struct clk *clk)
1703{
1704 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
1705}
1706EXPORT_SYMBOL_GPL(devm_clk_unregister);
1707
Mike Turquetteb24764902012-03-15 23:11:19 -07001708/*** clk rate change notifiers ***/
1709
1710/**
1711 * clk_notifier_register - add a clk rate change notifier
1712 * @clk: struct clk * to watch
1713 * @nb: struct notifier_block * with callback info
1714 *
1715 * Request notification when clk's rate changes. This uses an SRCU
1716 * notifier because we want it to block and notifier unregistrations are
1717 * uncommon. The callbacks associated with the notifier must not
1718 * re-enter into the clk framework by calling any top-level clk APIs;
1719 * this will cause a nested prepare_lock mutex.
1720 *
1721 * Pre-change notifier callbacks will be passed the current, pre-change
1722 * rate of the clk via struct clk_notifier_data.old_rate. The new,
1723 * post-change rate of the clk is passed via struct
1724 * clk_notifier_data.new_rate.
1725 *
1726 * Post-change notifiers will pass the now-current, post-change rate of
1727 * the clk in both struct clk_notifier_data.old_rate and struct
1728 * clk_notifier_data.new_rate.
1729 *
1730 * Abort-change notifiers are effectively the opposite of pre-change
1731 * notifiers: the original pre-change clk rate is passed in via struct
1732 * clk_notifier_data.new_rate and the failed post-change rate is passed
1733 * in via struct clk_notifier_data.old_rate.
1734 *
1735 * clk_notifier_register() must be called from non-atomic context.
1736 * Returns -EINVAL if called with null arguments, -ENOMEM upon
1737 * allocation failure; otherwise, passes along the return value of
1738 * srcu_notifier_chain_register().
1739 */
1740int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
1741{
1742 struct clk_notifier *cn;
1743 int ret = -ENOMEM;
1744
1745 if (!clk || !nb)
1746 return -EINVAL;
1747
1748 mutex_lock(&prepare_lock);
1749
1750 /* search the list of notifiers for this clk */
1751 list_for_each_entry(cn, &clk_notifier_list, node)
1752 if (cn->clk == clk)
1753 break;
1754
1755 /* if clk wasn't in the notifier list, allocate new clk_notifier */
1756 if (cn->clk != clk) {
1757 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
1758 if (!cn)
1759 goto out;
1760
1761 cn->clk = clk;
1762 srcu_init_notifier_head(&cn->notifier_head);
1763
1764 list_add(&cn->node, &clk_notifier_list);
1765 }
1766
1767 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
1768
1769 clk->notifier_count++;
1770
1771out:
1772 mutex_unlock(&prepare_lock);
1773
1774 return ret;
1775}
1776EXPORT_SYMBOL_GPL(clk_notifier_register);
1777
1778/**
1779 * clk_notifier_unregister - remove a clk rate change notifier
1780 * @clk: struct clk *
1781 * @nb: struct notifier_block * with callback info
1782 *
1783 * Request no further notification for changes to 'clk' and frees memory
1784 * allocated in clk_notifier_register.
1785 *
1786 * Returns -EINVAL if called with null arguments; otherwise, passes
1787 * along the return value of srcu_notifier_chain_unregister().
1788 */
1789int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
1790{
1791 struct clk_notifier *cn = NULL;
1792 int ret = -EINVAL;
1793
1794 if (!clk || !nb)
1795 return -EINVAL;
1796
1797 mutex_lock(&prepare_lock);
1798
1799 list_for_each_entry(cn, &clk_notifier_list, node)
1800 if (cn->clk == clk)
1801 break;
1802
1803 if (cn->clk == clk) {
1804 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
1805
1806 clk->notifier_count--;
1807
1808 /* XXX the notifier code should handle this better */
1809 if (!cn->notifier_head.head) {
1810 srcu_cleanup_notifier_head(&cn->notifier_head);
1811 kfree(cn);
1812 }
1813
1814 } else {
1815 ret = -ENOENT;
1816 }
1817
1818 mutex_unlock(&prepare_lock);
1819
1820 return ret;
1821}
1822EXPORT_SYMBOL_GPL(clk_notifier_unregister);
Grant Likely766e6a42012-04-09 14:50:06 -05001823
1824#ifdef CONFIG_OF
1825/**
1826 * struct of_clk_provider - Clock provider registration structure
1827 * @link: Entry in global list of clock providers
1828 * @node: Pointer to device tree node of clock provider
1829 * @get: Get clock callback. Returns NULL or a struct clk for the
1830 * given clock specifier
1831 * @data: context pointer to be passed into @get callback
1832 */
1833struct of_clk_provider {
1834 struct list_head link;
1835
1836 struct device_node *node;
1837 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
1838 void *data;
1839};
1840
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +05301841extern struct of_device_id __clk_of_table[];
1842
1843static const struct of_device_id __clk_of_table_sentinel
1844 __used __section(__clk_of_table_end);
1845
Grant Likely766e6a42012-04-09 14:50:06 -05001846static LIST_HEAD(of_clk_providers);
1847static DEFINE_MUTEX(of_clk_lock);
1848
1849struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
1850 void *data)
1851{
1852 return data;
1853}
1854EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
1855
Shawn Guo494bfec2012-08-22 21:36:27 +08001856struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
1857{
1858 struct clk_onecell_data *clk_data = data;
1859 unsigned int idx = clkspec->args[0];
1860
1861 if (idx >= clk_data->clk_num) {
1862 pr_err("%s: invalid clock index %d\n", __func__, idx);
1863 return ERR_PTR(-EINVAL);
1864 }
1865
1866 return clk_data->clks[idx];
1867}
1868EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
1869
Grant Likely766e6a42012-04-09 14:50:06 -05001870/**
1871 * of_clk_add_provider() - Register a clock provider for a node
1872 * @np: Device node pointer associated with clock provider
1873 * @clk_src_get: callback for decoding clock
1874 * @data: context pointer for @clk_src_get callback.
1875 */
1876int of_clk_add_provider(struct device_node *np,
1877 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
1878 void *data),
1879 void *data)
1880{
1881 struct of_clk_provider *cp;
1882
1883 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
1884 if (!cp)
1885 return -ENOMEM;
1886
1887 cp->node = of_node_get(np);
1888 cp->data = data;
1889 cp->get = clk_src_get;
1890
1891 mutex_lock(&of_clk_lock);
1892 list_add(&cp->link, &of_clk_providers);
1893 mutex_unlock(&of_clk_lock);
1894 pr_debug("Added clock from %s\n", np->full_name);
1895
1896 return 0;
1897}
1898EXPORT_SYMBOL_GPL(of_clk_add_provider);
1899
1900/**
1901 * of_clk_del_provider() - Remove a previously registered clock provider
1902 * @np: Device node pointer associated with clock provider
1903 */
1904void of_clk_del_provider(struct device_node *np)
1905{
1906 struct of_clk_provider *cp;
1907
1908 mutex_lock(&of_clk_lock);
1909 list_for_each_entry(cp, &of_clk_providers, link) {
1910 if (cp->node == np) {
1911 list_del(&cp->link);
1912 of_node_put(cp->node);
1913 kfree(cp);
1914 break;
1915 }
1916 }
1917 mutex_unlock(&of_clk_lock);
1918}
1919EXPORT_SYMBOL_GPL(of_clk_del_provider);
1920
1921struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
1922{
1923 struct of_clk_provider *provider;
1924 struct clk *clk = ERR_PTR(-ENOENT);
1925
1926 /* Check if we have such a provider in our array */
1927 mutex_lock(&of_clk_lock);
1928 list_for_each_entry(provider, &of_clk_providers, link) {
1929 if (provider->node == clkspec->np)
1930 clk = provider->get(clkspec, provider->data);
1931 if (!IS_ERR(clk))
1932 break;
1933 }
1934 mutex_unlock(&of_clk_lock);
1935
1936 return clk;
1937}
1938
1939const char *of_clk_get_parent_name(struct device_node *np, int index)
1940{
1941 struct of_phandle_args clkspec;
1942 const char *clk_name;
1943 int rc;
1944
1945 if (index < 0)
1946 return NULL;
1947
1948 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
1949 &clkspec);
1950 if (rc)
1951 return NULL;
1952
1953 if (of_property_read_string_index(clkspec.np, "clock-output-names",
1954 clkspec.args_count ? clkspec.args[0] : 0,
1955 &clk_name) < 0)
1956 clk_name = clkspec.np->name;
1957
1958 of_node_put(clkspec.np);
1959 return clk_name;
1960}
1961EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
1962
1963/**
1964 * of_clk_init() - Scan and init clock providers from the DT
1965 * @matches: array of compatible values and init functions for providers.
1966 *
1967 * This function scans the device tree for matching clock providers and
1968 * calls their initialization functions
1969 */
1970void __init of_clk_init(const struct of_device_id *matches)
1971{
1972 struct device_node *np;
1973
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +05301974 if (!matches)
1975 matches = __clk_of_table;
1976
Grant Likely766e6a42012-04-09 14:50:06 -05001977 for_each_matching_node(np, matches) {
1978 const struct of_device_id *match = of_match_node(matches, np);
1979 of_clk_init_cb_t clk_init_cb = match->data;
1980 clk_init_cb(np);
1981 }
1982}
1983#endif