blob: 83127363ad4c754f9c25ac16ffbc9e30cc13c04e [file] [log] [blame]
Mike Turquetteb24764902012-03-15 23:11:19 -07001/*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Standard functionality for the common clock API. See Documentation/clk.txt
10 */
11
12#include <linux/clk-private.h>
13#include <linux/module.h>
14#include <linux/mutex.h>
15#include <linux/spinlock.h>
16#include <linux/err.h>
17#include <linux/list.h>
18#include <linux/slab.h>
Grant Likely766e6a42012-04-09 14:50:06 -050019#include <linux/of.h>
Stephen Boyd46c87732012-09-24 13:38:04 -070020#include <linux/device.h>
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +053021#include <linux/init.h>
Mike Turquette533ddeb2013-03-28 13:59:02 -070022#include <linux/sched.h>
Mike Turquetteb24764902012-03-15 23:11:19 -070023
24static DEFINE_SPINLOCK(enable_lock);
25static DEFINE_MUTEX(prepare_lock);
26
Mike Turquette533ddeb2013-03-28 13:59:02 -070027static struct task_struct *prepare_owner;
28static struct task_struct *enable_owner;
29
30static int prepare_refcnt;
31static int enable_refcnt;
32
Mike Turquetteb24764902012-03-15 23:11:19 -070033static HLIST_HEAD(clk_root_list);
34static HLIST_HEAD(clk_orphan_list);
35static LIST_HEAD(clk_notifier_list);
36
Mike Turquetteeab89f62013-03-28 13:59:01 -070037/*** locking ***/
38static void clk_prepare_lock(void)
39{
Mike Turquette533ddeb2013-03-28 13:59:02 -070040 if (!mutex_trylock(&prepare_lock)) {
41 if (prepare_owner == current) {
42 prepare_refcnt++;
43 return;
44 }
45 mutex_lock(&prepare_lock);
46 }
47 WARN_ON_ONCE(prepare_owner != NULL);
48 WARN_ON_ONCE(prepare_refcnt != 0);
49 prepare_owner = current;
50 prepare_refcnt = 1;
Mike Turquetteeab89f62013-03-28 13:59:01 -070051}
52
53static void clk_prepare_unlock(void)
54{
Mike Turquette533ddeb2013-03-28 13:59:02 -070055 WARN_ON_ONCE(prepare_owner != current);
56 WARN_ON_ONCE(prepare_refcnt == 0);
57
58 if (--prepare_refcnt)
59 return;
60 prepare_owner = NULL;
Mike Turquetteeab89f62013-03-28 13:59:01 -070061 mutex_unlock(&prepare_lock);
62}
63
64static unsigned long clk_enable_lock(void)
65{
66 unsigned long flags;
Mike Turquette533ddeb2013-03-28 13:59:02 -070067
68 if (!spin_trylock_irqsave(&enable_lock, flags)) {
69 if (enable_owner == current) {
70 enable_refcnt++;
71 return flags;
72 }
73 spin_lock_irqsave(&enable_lock, flags);
74 }
75 WARN_ON_ONCE(enable_owner != NULL);
76 WARN_ON_ONCE(enable_refcnt != 0);
77 enable_owner = current;
78 enable_refcnt = 1;
Mike Turquetteeab89f62013-03-28 13:59:01 -070079 return flags;
80}
81
82static void clk_enable_unlock(unsigned long flags)
83{
Mike Turquette533ddeb2013-03-28 13:59:02 -070084 WARN_ON_ONCE(enable_owner != current);
85 WARN_ON_ONCE(enable_refcnt == 0);
86
87 if (--enable_refcnt)
88 return;
89 enable_owner = NULL;
Mike Turquetteeab89f62013-03-28 13:59:01 -070090 spin_unlock_irqrestore(&enable_lock, flags);
91}
92
Mike Turquetteb24764902012-03-15 23:11:19 -070093/*** debugfs support ***/
94
95#ifdef CONFIG_COMMON_CLK_DEBUG
96#include <linux/debugfs.h>
97
98static struct dentry *rootdir;
99static struct dentry *orphandir;
100static int inited = 0;
101
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530102static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
103{
104 if (!c)
105 return;
106
107 seq_printf(s, "%*s%-*s %-11d %-12d %-10lu",
108 level * 3 + 1, "",
109 30 - level * 3, c->name,
Peter De Schrijver670decd2013-06-05 18:06:35 +0300110 c->enable_count, c->prepare_count, clk_get_rate(c));
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530111 seq_printf(s, "\n");
112}
113
114static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
115 int level)
116{
117 struct clk *child;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530118
119 if (!c)
120 return;
121
122 clk_summary_show_one(s, c, level);
123
Sasha Levinb67bfe02013-02-27 17:06:00 -0800124 hlist_for_each_entry(child, &c->children, child_node)
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530125 clk_summary_show_subtree(s, child, level + 1);
126}
127
128static int clk_summary_show(struct seq_file *s, void *data)
129{
130 struct clk *c;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530131
132 seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
133 seq_printf(s, "---------------------------------------------------------------------\n");
134
Mike Turquetteeab89f62013-03-28 13:59:01 -0700135 clk_prepare_lock();
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530136
Sasha Levinb67bfe02013-02-27 17:06:00 -0800137 hlist_for_each_entry(c, &clk_root_list, child_node)
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530138 clk_summary_show_subtree(s, c, 0);
139
Sasha Levinb67bfe02013-02-27 17:06:00 -0800140 hlist_for_each_entry(c, &clk_orphan_list, child_node)
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530141 clk_summary_show_subtree(s, c, 0);
142
Mike Turquetteeab89f62013-03-28 13:59:01 -0700143 clk_prepare_unlock();
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530144
145 return 0;
146}
147
148
149static int clk_summary_open(struct inode *inode, struct file *file)
150{
151 return single_open(file, clk_summary_show, inode->i_private);
152}
153
154static const struct file_operations clk_summary_fops = {
155 .open = clk_summary_open,
156 .read = seq_read,
157 .llseek = seq_lseek,
158 .release = single_release,
159};
160
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530161static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
162{
163 if (!c)
164 return;
165
166 seq_printf(s, "\"%s\": { ", c->name);
167 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
168 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
Peter De Schrijver670decd2013-06-05 18:06:35 +0300169 seq_printf(s, "\"rate\": %lu", clk_get_rate(c));
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530170}
171
172static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
173{
174 struct clk *child;
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530175
176 if (!c)
177 return;
178
179 clk_dump_one(s, c, level);
180
Sasha Levinb67bfe02013-02-27 17:06:00 -0800181 hlist_for_each_entry(child, &c->children, child_node) {
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530182 seq_printf(s, ",");
183 clk_dump_subtree(s, child, level + 1);
184 }
185
186 seq_printf(s, "}");
187}
188
189static int clk_dump(struct seq_file *s, void *data)
190{
191 struct clk *c;
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530192 bool first_node = true;
193
194 seq_printf(s, "{");
195
Mike Turquetteeab89f62013-03-28 13:59:01 -0700196 clk_prepare_lock();
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530197
Sasha Levinb67bfe02013-02-27 17:06:00 -0800198 hlist_for_each_entry(c, &clk_root_list, child_node) {
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530199 if (!first_node)
200 seq_printf(s, ",");
201 first_node = false;
202 clk_dump_subtree(s, c, 0);
203 }
204
Sasha Levinb67bfe02013-02-27 17:06:00 -0800205 hlist_for_each_entry(c, &clk_orphan_list, child_node) {
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530206 seq_printf(s, ",");
207 clk_dump_subtree(s, c, 0);
208 }
209
Mike Turquetteeab89f62013-03-28 13:59:01 -0700210 clk_prepare_unlock();
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530211
212 seq_printf(s, "}");
213 return 0;
214}
215
216
217static int clk_dump_open(struct inode *inode, struct file *file)
218{
219 return single_open(file, clk_dump, inode->i_private);
220}
221
222static const struct file_operations clk_dump_fops = {
223 .open = clk_dump_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = single_release,
227};
228
Mike Turquetteb24764902012-03-15 23:11:19 -0700229/* caller must hold prepare_lock */
230static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
231{
232 struct dentry *d;
233 int ret = -ENOMEM;
234
235 if (!clk || !pdentry) {
236 ret = -EINVAL;
237 goto out;
238 }
239
240 d = debugfs_create_dir(clk->name, pdentry);
241 if (!d)
242 goto out;
243
244 clk->dentry = d;
245
246 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
247 (u32 *)&clk->rate);
248 if (!d)
249 goto err_out;
250
251 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
252 (u32 *)&clk->flags);
253 if (!d)
254 goto err_out;
255
256 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
257 (u32 *)&clk->prepare_count);
258 if (!d)
259 goto err_out;
260
261 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
262 (u32 *)&clk->enable_count);
263 if (!d)
264 goto err_out;
265
266 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
267 (u32 *)&clk->notifier_count);
268 if (!d)
269 goto err_out;
270
271 ret = 0;
272 goto out;
273
274err_out:
Alex Elderb5f98e62013-11-27 09:39:49 -0600275 debugfs_remove_recursive(clk->dentry);
276 clk->dentry = NULL;
Mike Turquetteb24764902012-03-15 23:11:19 -0700277out:
278 return ret;
279}
280
281/* caller must hold prepare_lock */
282static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
283{
284 struct clk *child;
Mike Turquetteb24764902012-03-15 23:11:19 -0700285 int ret = -EINVAL;;
286
287 if (!clk || !pdentry)
288 goto out;
289
290 ret = clk_debug_create_one(clk, pdentry);
291
292 if (ret)
293 goto out;
294
Sasha Levinb67bfe02013-02-27 17:06:00 -0800295 hlist_for_each_entry(child, &clk->children, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700296 clk_debug_create_subtree(child, clk->dentry);
297
298 ret = 0;
299out:
300 return ret;
301}
302
303/**
304 * clk_debug_register - add a clk node to the debugfs clk tree
305 * @clk: the clk being added to the debugfs clk tree
306 *
307 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
308 * initialized. Otherwise it bails out early since the debugfs clk tree
309 * will be created lazily by clk_debug_init as part of a late_initcall.
310 *
311 * Caller must hold prepare_lock. Only clk_init calls this function (so
312 * far) so this is taken care.
313 */
314static int clk_debug_register(struct clk *clk)
315{
316 struct clk *parent;
317 struct dentry *pdentry;
318 int ret = 0;
319
320 if (!inited)
321 goto out;
322
323 parent = clk->parent;
324
325 /*
326 * Check to see if a clk is a root clk. Also check that it is
327 * safe to add this clk to debugfs
328 */
329 if (!parent)
330 if (clk->flags & CLK_IS_ROOT)
331 pdentry = rootdir;
332 else
333 pdentry = orphandir;
334 else
335 if (parent->dentry)
336 pdentry = parent->dentry;
337 else
338 goto out;
339
340 ret = clk_debug_create_subtree(clk, pdentry);
341
342out:
343 return ret;
344}
345
346/**
Ulf Hanssonb33d2122013-04-02 23:09:37 +0200347 * clk_debug_reparent - reparent clk node in the debugfs clk tree
348 * @clk: the clk being reparented
349 * @new_parent: the new clk parent, may be NULL
350 *
351 * Rename clk entry in the debugfs clk tree if debugfs has been
352 * initialized. Otherwise it bails out early since the debugfs clk tree
353 * will be created lazily by clk_debug_init as part of a late_initcall.
354 *
355 * Caller must hold prepare_lock.
356 */
357static void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
358{
359 struct dentry *d;
360 struct dentry *new_parent_d;
361
362 if (!inited)
363 return;
364
365 if (new_parent)
366 new_parent_d = new_parent->dentry;
367 else
368 new_parent_d = orphandir;
369
370 d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
371 new_parent_d, clk->name);
372 if (d)
373 clk->dentry = d;
374 else
375 pr_debug("%s: failed to rename debugfs entry for %s\n",
376 __func__, clk->name);
377}
378
379/**
Mike Turquetteb24764902012-03-15 23:11:19 -0700380 * clk_debug_init - lazily create the debugfs clk tree visualization
381 *
382 * clks are often initialized very early during boot before memory can
383 * be dynamically allocated and well before debugfs is setup.
384 * clk_debug_init walks the clk tree hierarchy while holding
385 * prepare_lock and creates the topology as part of a late_initcall,
386 * thus insuring that clks initialized very early will still be
387 * represented in the debugfs clk tree. This function should only be
388 * called once at boot-time, and all other clks added dynamically will
389 * be done so with clk_debug_register.
390 */
391static int __init clk_debug_init(void)
392{
393 struct clk *clk;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530394 struct dentry *d;
Mike Turquetteb24764902012-03-15 23:11:19 -0700395
396 rootdir = debugfs_create_dir("clk", NULL);
397
398 if (!rootdir)
399 return -ENOMEM;
400
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530401 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, NULL,
402 &clk_summary_fops);
403 if (!d)
404 return -ENOMEM;
405
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530406 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, NULL,
407 &clk_dump_fops);
408 if (!d)
409 return -ENOMEM;
410
Mike Turquetteb24764902012-03-15 23:11:19 -0700411 orphandir = debugfs_create_dir("orphans", rootdir);
412
413 if (!orphandir)
414 return -ENOMEM;
415
Mike Turquetteeab89f62013-03-28 13:59:01 -0700416 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700417
Sasha Levinb67bfe02013-02-27 17:06:00 -0800418 hlist_for_each_entry(clk, &clk_root_list, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700419 clk_debug_create_subtree(clk, rootdir);
420
Sasha Levinb67bfe02013-02-27 17:06:00 -0800421 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700422 clk_debug_create_subtree(clk, orphandir);
423
424 inited = 1;
425
Mike Turquetteeab89f62013-03-28 13:59:01 -0700426 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700427
428 return 0;
429}
430late_initcall(clk_debug_init);
431#else
432static inline int clk_debug_register(struct clk *clk) { return 0; }
Ulf Hanssonb33d2122013-04-02 23:09:37 +0200433static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
434{
435}
Mike Turquette70d347e2012-03-26 11:53:47 -0700436#endif
Mike Turquetteb24764902012-03-15 23:11:19 -0700437
Mike Turquetteb24764902012-03-15 23:11:19 -0700438/* caller must hold prepare_lock */
Ulf Hansson1c155b32013-03-12 20:26:03 +0100439static void clk_unprepare_unused_subtree(struct clk *clk)
440{
441 struct clk *child;
442
443 if (!clk)
444 return;
445
446 hlist_for_each_entry(child, &clk->children, child_node)
447 clk_unprepare_unused_subtree(child);
448
449 if (clk->prepare_count)
450 return;
451
452 if (clk->flags & CLK_IGNORE_UNUSED)
453 return;
454
Ulf Hansson3cc82472013-03-12 20:26:04 +0100455 if (__clk_is_prepared(clk)) {
456 if (clk->ops->unprepare_unused)
457 clk->ops->unprepare_unused(clk->hw);
458 else if (clk->ops->unprepare)
Ulf Hansson1c155b32013-03-12 20:26:03 +0100459 clk->ops->unprepare(clk->hw);
Ulf Hansson3cc82472013-03-12 20:26:04 +0100460 }
Ulf Hansson1c155b32013-03-12 20:26:03 +0100461}
462
463/* caller must hold prepare_lock */
Mike Turquetteb24764902012-03-15 23:11:19 -0700464static void clk_disable_unused_subtree(struct clk *clk)
465{
466 struct clk *child;
Mike Turquetteb24764902012-03-15 23:11:19 -0700467 unsigned long flags;
468
469 if (!clk)
470 goto out;
471
Sasha Levinb67bfe02013-02-27 17:06:00 -0800472 hlist_for_each_entry(child, &clk->children, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700473 clk_disable_unused_subtree(child);
474
Mike Turquetteeab89f62013-03-28 13:59:01 -0700475 flags = clk_enable_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700476
477 if (clk->enable_count)
478 goto unlock_out;
479
480 if (clk->flags & CLK_IGNORE_UNUSED)
481 goto unlock_out;
482
Mike Turquette7c045a52012-12-04 11:00:35 -0800483 /*
484 * some gate clocks have special needs during the disable-unused
485 * sequence. call .disable_unused if available, otherwise fall
486 * back to .disable
487 */
488 if (__clk_is_enabled(clk)) {
489 if (clk->ops->disable_unused)
490 clk->ops->disable_unused(clk->hw);
491 else if (clk->ops->disable)
492 clk->ops->disable(clk->hw);
493 }
Mike Turquetteb24764902012-03-15 23:11:19 -0700494
495unlock_out:
Mike Turquetteeab89f62013-03-28 13:59:01 -0700496 clk_enable_unlock(flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700497
498out:
499 return;
500}
501
Olof Johansson1e435252013-04-27 14:10:18 -0700502static bool clk_ignore_unused;
503static int __init clk_ignore_unused_setup(char *__unused)
504{
505 clk_ignore_unused = true;
506 return 1;
507}
508__setup("clk_ignore_unused", clk_ignore_unused_setup);
509
Mike Turquetteb24764902012-03-15 23:11:19 -0700510static int clk_disable_unused(void)
511{
512 struct clk *clk;
Mike Turquetteb24764902012-03-15 23:11:19 -0700513
Olof Johansson1e435252013-04-27 14:10:18 -0700514 if (clk_ignore_unused) {
515 pr_warn("clk: Not disabling unused clocks\n");
516 return 0;
517 }
518
Mike Turquetteeab89f62013-03-28 13:59:01 -0700519 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700520
Sasha Levinb67bfe02013-02-27 17:06:00 -0800521 hlist_for_each_entry(clk, &clk_root_list, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700522 clk_disable_unused_subtree(clk);
523
Sasha Levinb67bfe02013-02-27 17:06:00 -0800524 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700525 clk_disable_unused_subtree(clk);
526
Ulf Hansson1c155b32013-03-12 20:26:03 +0100527 hlist_for_each_entry(clk, &clk_root_list, child_node)
528 clk_unprepare_unused_subtree(clk);
529
530 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
531 clk_unprepare_unused_subtree(clk);
532
Mike Turquetteeab89f62013-03-28 13:59:01 -0700533 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700534
535 return 0;
536}
Saravana Kannand41d5802013-05-09 11:35:01 -0700537late_initcall_sync(clk_disable_unused);
Mike Turquetteb24764902012-03-15 23:11:19 -0700538
539/*** helper functions ***/
540
Russ Dill65800b22012-11-26 11:20:09 -0800541const char *__clk_get_name(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700542{
543 return !clk ? NULL : clk->name;
544}
Niels de Vos48950842012-12-13 13:12:25 +0100545EXPORT_SYMBOL_GPL(__clk_get_name);
Mike Turquetteb24764902012-03-15 23:11:19 -0700546
Russ Dill65800b22012-11-26 11:20:09 -0800547struct clk_hw *__clk_get_hw(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700548{
549 return !clk ? NULL : clk->hw;
550}
551
Russ Dill65800b22012-11-26 11:20:09 -0800552u8 __clk_get_num_parents(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700553{
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700554 return !clk ? 0 : clk->num_parents;
Mike Turquetteb24764902012-03-15 23:11:19 -0700555}
556
Russ Dill65800b22012-11-26 11:20:09 -0800557struct clk *__clk_get_parent(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700558{
559 return !clk ? NULL : clk->parent;
560}
561
James Hogan7ef3dcc2013-07-29 12:24:58 +0100562struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
563{
564 if (!clk || index >= clk->num_parents)
565 return NULL;
566 else if (!clk->parents)
567 return __clk_lookup(clk->parent_names[index]);
568 else if (!clk->parents[index])
569 return clk->parents[index] =
570 __clk_lookup(clk->parent_names[index]);
571 else
572 return clk->parents[index];
573}
574
Russ Dill65800b22012-11-26 11:20:09 -0800575unsigned int __clk_get_enable_count(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700576{
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700577 return !clk ? 0 : clk->enable_count;
Mike Turquetteb24764902012-03-15 23:11:19 -0700578}
579
Russ Dill65800b22012-11-26 11:20:09 -0800580unsigned int __clk_get_prepare_count(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700581{
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700582 return !clk ? 0 : clk->prepare_count;
Mike Turquetteb24764902012-03-15 23:11:19 -0700583}
584
585unsigned long __clk_get_rate(struct clk *clk)
586{
587 unsigned long ret;
588
589 if (!clk) {
Rajendra Nayak34e44fe2012-03-26 19:01:48 +0530590 ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700591 goto out;
592 }
593
594 ret = clk->rate;
595
596 if (clk->flags & CLK_IS_ROOT)
597 goto out;
598
599 if (!clk->parent)
Rajendra Nayak34e44fe2012-03-26 19:01:48 +0530600 ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700601
602out:
603 return ret;
604}
605
Russ Dill65800b22012-11-26 11:20:09 -0800606unsigned long __clk_get_flags(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700607{
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700608 return !clk ? 0 : clk->flags;
Mike Turquetteb24764902012-03-15 23:11:19 -0700609}
Thierry Redingb05c6832013-09-03 09:43:51 +0200610EXPORT_SYMBOL_GPL(__clk_get_flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700611
Ulf Hansson3d6ee282013-03-12 20:26:02 +0100612bool __clk_is_prepared(struct clk *clk)
613{
614 int ret;
615
616 if (!clk)
617 return false;
618
619 /*
620 * .is_prepared is optional for clocks that can prepare
621 * fall back to software usage counter if it is missing
622 */
623 if (!clk->ops->is_prepared) {
624 ret = clk->prepare_count ? 1 : 0;
625 goto out;
626 }
627
628 ret = clk->ops->is_prepared(clk->hw);
629out:
630 return !!ret;
631}
632
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700633bool __clk_is_enabled(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700634{
635 int ret;
636
637 if (!clk)
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700638 return false;
Mike Turquetteb24764902012-03-15 23:11:19 -0700639
640 /*
641 * .is_enabled is only mandatory for clocks that gate
642 * fall back to software usage counter if .is_enabled is missing
643 */
644 if (!clk->ops->is_enabled) {
645 ret = clk->enable_count ? 1 : 0;
646 goto out;
647 }
648
649 ret = clk->ops->is_enabled(clk->hw);
650out:
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700651 return !!ret;
Mike Turquetteb24764902012-03-15 23:11:19 -0700652}
653
654static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
655{
656 struct clk *child;
657 struct clk *ret;
Mike Turquetteb24764902012-03-15 23:11:19 -0700658
659 if (!strcmp(clk->name, name))
660 return clk;
661
Sasha Levinb67bfe02013-02-27 17:06:00 -0800662 hlist_for_each_entry(child, &clk->children, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -0700663 ret = __clk_lookup_subtree(name, child);
664 if (ret)
665 return ret;
666 }
667
668 return NULL;
669}
670
671struct clk *__clk_lookup(const char *name)
672{
673 struct clk *root_clk;
674 struct clk *ret;
Mike Turquetteb24764902012-03-15 23:11:19 -0700675
676 if (!name)
677 return NULL;
678
679 /* search the 'proper' clk tree first */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800680 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -0700681 ret = __clk_lookup_subtree(name, root_clk);
682 if (ret)
683 return ret;
684 }
685
686 /* if not found, then search the orphan tree */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800687 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -0700688 ret = __clk_lookup_subtree(name, root_clk);
689 if (ret)
690 return ret;
691 }
692
693 return NULL;
694}
695
James Hogane366fdd2013-07-29 12:25:02 +0100696/*
697 * Helper for finding best parent to provide a given frequency. This can be used
698 * directly as a determine_rate callback (e.g. for a mux), or from a more
699 * complex clock that may combine a mux with other operations.
700 */
701long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
702 unsigned long *best_parent_rate,
703 struct clk **best_parent_p)
704{
705 struct clk *clk = hw->clk, *parent, *best_parent = NULL;
706 int i, num_parents;
707 unsigned long parent_rate, best = 0;
708
709 /* if NO_REPARENT flag set, pass through to current parent */
710 if (clk->flags & CLK_SET_RATE_NO_REPARENT) {
711 parent = clk->parent;
712 if (clk->flags & CLK_SET_RATE_PARENT)
713 best = __clk_round_rate(parent, rate);
714 else if (parent)
715 best = __clk_get_rate(parent);
716 else
717 best = __clk_get_rate(clk);
718 goto out;
719 }
720
721 /* find the parent that can provide the fastest rate <= rate */
722 num_parents = clk->num_parents;
723 for (i = 0; i < num_parents; i++) {
724 parent = clk_get_parent_by_index(clk, i);
725 if (!parent)
726 continue;
727 if (clk->flags & CLK_SET_RATE_PARENT)
728 parent_rate = __clk_round_rate(parent, rate);
729 else
730 parent_rate = __clk_get_rate(parent);
731 if (parent_rate <= rate && parent_rate > best) {
732 best_parent = parent;
733 best = parent_rate;
734 }
735 }
736
737out:
738 if (best_parent)
739 *best_parent_p = best_parent;
740 *best_parent_rate = best;
741
742 return best;
743}
744
Mike Turquetteb24764902012-03-15 23:11:19 -0700745/*** clk api ***/
746
747void __clk_unprepare(struct clk *clk)
748{
749 if (!clk)
750 return;
751
752 if (WARN_ON(clk->prepare_count == 0))
753 return;
754
755 if (--clk->prepare_count > 0)
756 return;
757
758 WARN_ON(clk->enable_count > 0);
759
760 if (clk->ops->unprepare)
761 clk->ops->unprepare(clk->hw);
762
763 __clk_unprepare(clk->parent);
764}
765
766/**
767 * clk_unprepare - undo preparation of a clock source
Peter Meerwald24ee1a02013-06-29 15:14:19 +0200768 * @clk: the clk being unprepared
Mike Turquetteb24764902012-03-15 23:11:19 -0700769 *
770 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
771 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
772 * if the operation may sleep. One example is a clk which is accessed over
773 * I2c. In the complex case a clk gate operation may require a fast and a slow
774 * part. It is this reason that clk_unprepare and clk_disable are not mutually
775 * exclusive. In fact clk_disable must be called before clk_unprepare.
776 */
777void clk_unprepare(struct clk *clk)
778{
Mike Turquetteeab89f62013-03-28 13:59:01 -0700779 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700780 __clk_unprepare(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700781 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700782}
783EXPORT_SYMBOL_GPL(clk_unprepare);
784
785int __clk_prepare(struct clk *clk)
786{
787 int ret = 0;
788
789 if (!clk)
790 return 0;
791
792 if (clk->prepare_count == 0) {
793 ret = __clk_prepare(clk->parent);
794 if (ret)
795 return ret;
796
797 if (clk->ops->prepare) {
798 ret = clk->ops->prepare(clk->hw);
799 if (ret) {
800 __clk_unprepare(clk->parent);
801 return ret;
802 }
803 }
804 }
805
806 clk->prepare_count++;
807
808 return 0;
809}
810
811/**
812 * clk_prepare - prepare a clock source
813 * @clk: the clk being prepared
814 *
815 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
816 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
817 * operation may sleep. One example is a clk which is accessed over I2c. In
818 * the complex case a clk ungate operation may require a fast and a slow part.
819 * It is this reason that clk_prepare and clk_enable are not mutually
820 * exclusive. In fact clk_prepare must be called before clk_enable.
821 * Returns 0 on success, -EERROR otherwise.
822 */
823int clk_prepare(struct clk *clk)
824{
825 int ret;
826
Mike Turquetteeab89f62013-03-28 13:59:01 -0700827 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700828 ret = __clk_prepare(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700829 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700830
831 return ret;
832}
833EXPORT_SYMBOL_GPL(clk_prepare);
834
835static void __clk_disable(struct clk *clk)
836{
837 if (!clk)
838 return;
839
Fengguang Wue47c6a32012-07-30 14:39:54 -0700840 if (WARN_ON(IS_ERR(clk)))
841 return;
842
Mike Turquetteb24764902012-03-15 23:11:19 -0700843 if (WARN_ON(clk->enable_count == 0))
844 return;
845
846 if (--clk->enable_count > 0)
847 return;
848
849 if (clk->ops->disable)
850 clk->ops->disable(clk->hw);
851
852 __clk_disable(clk->parent);
853}
854
855/**
856 * clk_disable - gate a clock
857 * @clk: the clk being gated
858 *
859 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
860 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
861 * clk if the operation is fast and will never sleep. One example is a
862 * SoC-internal clk which is controlled via simple register writes. In the
863 * complex case a clk gate operation may require a fast and a slow part. It is
864 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
865 * In fact clk_disable must be called before clk_unprepare.
866 */
867void clk_disable(struct clk *clk)
868{
869 unsigned long flags;
870
Mike Turquetteeab89f62013-03-28 13:59:01 -0700871 flags = clk_enable_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700872 __clk_disable(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700873 clk_enable_unlock(flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700874}
875EXPORT_SYMBOL_GPL(clk_disable);
876
877static int __clk_enable(struct clk *clk)
878{
879 int ret = 0;
880
881 if (!clk)
882 return 0;
883
884 if (WARN_ON(clk->prepare_count == 0))
885 return -ESHUTDOWN;
886
887 if (clk->enable_count == 0) {
888 ret = __clk_enable(clk->parent);
889
890 if (ret)
891 return ret;
892
893 if (clk->ops->enable) {
894 ret = clk->ops->enable(clk->hw);
895 if (ret) {
896 __clk_disable(clk->parent);
897 return ret;
898 }
899 }
900 }
901
902 clk->enable_count++;
903 return 0;
904}
905
906/**
907 * clk_enable - ungate a clock
908 * @clk: the clk being ungated
909 *
910 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
911 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
912 * if the operation will never sleep. One example is a SoC-internal clk which
913 * is controlled via simple register writes. In the complex case a clk ungate
914 * operation may require a fast and a slow part. It is this reason that
915 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
916 * must be called before clk_enable. Returns 0 on success, -EERROR
917 * otherwise.
918 */
919int clk_enable(struct clk *clk)
920{
921 unsigned long flags;
922 int ret;
923
Mike Turquetteeab89f62013-03-28 13:59:01 -0700924 flags = clk_enable_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700925 ret = __clk_enable(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700926 clk_enable_unlock(flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700927
928 return ret;
929}
930EXPORT_SYMBOL_GPL(clk_enable);
931
932/**
Mike Turquetteb24764902012-03-15 23:11:19 -0700933 * __clk_round_rate - round the given rate for a clk
934 * @clk: round the rate of this clock
Peter Meerwald24ee1a02013-06-29 15:14:19 +0200935 * @rate: the rate which is to be rounded
Mike Turquetteb24764902012-03-15 23:11:19 -0700936 *
937 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
938 */
939unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
940{
Shawn Guo81536e02012-04-12 20:50:17 +0800941 unsigned long parent_rate = 0;
James Hogan71472c02013-07-29 12:25:00 +0100942 struct clk *parent;
Mike Turquetteb24764902012-03-15 23:11:19 -0700943
944 if (!clk)
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700945 return 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700946
James Hogan71472c02013-07-29 12:25:00 +0100947 parent = clk->parent;
948 if (parent)
949 parent_rate = parent->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -0700950
James Hogan71472c02013-07-29 12:25:00 +0100951 if (clk->ops->determine_rate)
952 return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
953 &parent);
954 else if (clk->ops->round_rate)
955 return clk->ops->round_rate(clk->hw, rate, &parent_rate);
956 else if (clk->flags & CLK_SET_RATE_PARENT)
957 return __clk_round_rate(clk->parent, rate);
958 else
959 return clk->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -0700960}
961
962/**
963 * clk_round_rate - round the given rate for a clk
964 * @clk: the clk for which we are rounding a rate
965 * @rate: the rate which is to be rounded
966 *
967 * Takes in a rate as input and rounds it to a rate that the clk can actually
968 * use which is then returned. If clk doesn't support round_rate operation
969 * then the parent rate is returned.
970 */
971long clk_round_rate(struct clk *clk, unsigned long rate)
972{
973 unsigned long ret;
974
Mike Turquetteeab89f62013-03-28 13:59:01 -0700975 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700976 ret = __clk_round_rate(clk, rate);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700977 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700978
979 return ret;
980}
981EXPORT_SYMBOL_GPL(clk_round_rate);
982
983/**
984 * __clk_notify - call clk notifier chain
985 * @clk: struct clk * that is changing rate
986 * @msg: clk notifier type (see include/linux/clk.h)
987 * @old_rate: old clk rate
988 * @new_rate: new clk rate
989 *
990 * Triggers a notifier call chain on the clk rate-change notification
991 * for 'clk'. Passes a pointer to the struct clk and the previous
992 * and current rates to the notifier callback. Intended to be called by
993 * internal clock code only. Returns NOTIFY_DONE from the last driver
994 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
995 * a driver returns that.
996 */
997static int __clk_notify(struct clk *clk, unsigned long msg,
998 unsigned long old_rate, unsigned long new_rate)
999{
1000 struct clk_notifier *cn;
1001 struct clk_notifier_data cnd;
1002 int ret = NOTIFY_DONE;
1003
1004 cnd.clk = clk;
1005 cnd.old_rate = old_rate;
1006 cnd.new_rate = new_rate;
1007
1008 list_for_each_entry(cn, &clk_notifier_list, node) {
1009 if (cn->clk == clk) {
1010 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1011 &cnd);
1012 break;
1013 }
1014 }
1015
1016 return ret;
1017}
1018
1019/**
1020 * __clk_recalc_rates
1021 * @clk: first clk in the subtree
1022 * @msg: notification type (see include/linux/clk.h)
1023 *
1024 * Walks the subtree of clks starting with clk and recalculates rates as it
1025 * goes. Note that if a clk does not implement the .recalc_rate callback then
Peter Meerwald24ee1a02013-06-29 15:14:19 +02001026 * it is assumed that the clock will take on the rate of its parent.
Mike Turquetteb24764902012-03-15 23:11:19 -07001027 *
1028 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1029 * if necessary.
1030 *
1031 * Caller must hold prepare_lock.
1032 */
1033static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
1034{
1035 unsigned long old_rate;
1036 unsigned long parent_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001037 struct clk *child;
1038
1039 old_rate = clk->rate;
1040
1041 if (clk->parent)
1042 parent_rate = clk->parent->rate;
1043
1044 if (clk->ops->recalc_rate)
1045 clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
1046 else
1047 clk->rate = parent_rate;
1048
1049 /*
1050 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1051 * & ABORT_RATE_CHANGE notifiers
1052 */
1053 if (clk->notifier_count && msg)
1054 __clk_notify(clk, msg, old_rate, clk->rate);
1055
Sasha Levinb67bfe02013-02-27 17:06:00 -08001056 hlist_for_each_entry(child, &clk->children, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -07001057 __clk_recalc_rates(child, msg);
1058}
1059
1060/**
Ulf Hanssona093bde2012-08-31 14:21:28 +02001061 * clk_get_rate - return the rate of clk
1062 * @clk: the clk whose rate is being returned
1063 *
1064 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1065 * is set, which means a recalc_rate will be issued.
1066 * If clk is NULL then returns 0.
1067 */
1068unsigned long clk_get_rate(struct clk *clk)
1069{
1070 unsigned long rate;
1071
Mike Turquetteeab89f62013-03-28 13:59:01 -07001072 clk_prepare_lock();
Ulf Hanssona093bde2012-08-31 14:21:28 +02001073
1074 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
1075 __clk_recalc_rates(clk, 0);
1076
1077 rate = __clk_get_rate(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -07001078 clk_prepare_unlock();
Ulf Hanssona093bde2012-08-31 14:21:28 +02001079
1080 return rate;
1081}
1082EXPORT_SYMBOL_GPL(clk_get_rate);
1083
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001084static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
James Hogan4935b222013-07-29 12:24:59 +01001085{
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001086 int i;
James Hogan4935b222013-07-29 12:24:59 +01001087
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001088 if (!clk->parents) {
Tomasz Figa96a7ed92013-09-29 02:37:15 +02001089 clk->parents = kcalloc(clk->num_parents,
1090 sizeof(struct clk *), GFP_KERNEL);
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001091 if (!clk->parents)
1092 return -ENOMEM;
1093 }
James Hogan4935b222013-07-29 12:24:59 +01001094
1095 /*
1096 * find index of new parent clock using cached parent ptrs,
1097 * or if not yet cached, use string name comparison and cache
1098 * them now to avoid future calls to __clk_lookup.
1099 */
1100 for (i = 0; i < clk->num_parents; i++) {
Tomasz Figada0f0b22013-09-29 02:37:16 +02001101 if (clk->parents[i] == parent)
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001102 return i;
Tomasz Figada0f0b22013-09-29 02:37:16 +02001103
1104 if (clk->parents[i])
1105 continue;
1106
1107 if (!strcmp(clk->parent_names[i], parent->name)) {
1108 clk->parents[i] = __clk_lookup(parent->name);
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001109 return i;
James Hogan4935b222013-07-29 12:24:59 +01001110 }
1111 }
1112
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001113 return -EINVAL;
James Hogan4935b222013-07-29 12:24:59 +01001114}
1115
1116static void clk_reparent(struct clk *clk, struct clk *new_parent)
1117{
1118 hlist_del(&clk->child_node);
1119
James Hogan903efc52013-08-29 12:10:51 +01001120 if (new_parent) {
1121 /* avoid duplicate POST_RATE_CHANGE notifications */
1122 if (new_parent->new_child == clk)
1123 new_parent->new_child = NULL;
1124
James Hogan4935b222013-07-29 12:24:59 +01001125 hlist_add_head(&clk->child_node, &new_parent->children);
James Hogan903efc52013-08-29 12:10:51 +01001126 } else {
James Hogan4935b222013-07-29 12:24:59 +01001127 hlist_add_head(&clk->child_node, &clk_orphan_list);
James Hogan903efc52013-08-29 12:10:51 +01001128 }
James Hogan4935b222013-07-29 12:24:59 +01001129
1130 clk->parent = new_parent;
1131}
1132
1133static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1134{
1135 unsigned long flags;
1136 int ret = 0;
1137 struct clk *old_parent = clk->parent;
1138
1139 /*
1140 * Migrate prepare state between parents and prevent race with
1141 * clk_enable().
1142 *
1143 * If the clock is not prepared, then a race with
1144 * clk_enable/disable() is impossible since we already have the
1145 * prepare lock (future calls to clk_enable() need to be preceded by
1146 * a clk_prepare()).
1147 *
1148 * If the clock is prepared, migrate the prepared state to the new
1149 * parent and also protect against a race with clk_enable() by
1150 * forcing the clock and the new parent on. This ensures that all
1151 * future calls to clk_enable() are practically NOPs with respect to
1152 * hardware and software states.
1153 *
1154 * See also: Comment for clk_set_parent() below.
1155 */
1156 if (clk->prepare_count) {
1157 __clk_prepare(parent);
1158 clk_enable(parent);
1159 clk_enable(clk);
1160 }
1161
1162 /* update the clk tree topology */
1163 flags = clk_enable_lock();
1164 clk_reparent(clk, parent);
1165 clk_enable_unlock(flags);
1166
1167 /* change clock input source */
1168 if (parent && clk->ops->set_parent)
1169 ret = clk->ops->set_parent(clk->hw, p_index);
1170
1171 if (ret) {
1172 flags = clk_enable_lock();
1173 clk_reparent(clk, old_parent);
1174 clk_enable_unlock(flags);
1175
1176 if (clk->prepare_count) {
1177 clk_disable(clk);
1178 clk_disable(parent);
1179 __clk_unprepare(parent);
1180 }
1181 return ret;
1182 }
1183
1184 /*
1185 * Finish the migration of prepare state and undo the changes done
1186 * for preventing a race with clk_enable().
1187 */
1188 if (clk->prepare_count) {
1189 clk_disable(clk);
1190 clk_disable(old_parent);
1191 __clk_unprepare(old_parent);
1192 }
1193
1194 /* update debugfs with new clk tree topology */
1195 clk_debug_reparent(clk, parent);
1196 return 0;
1197}
1198
Ulf Hanssona093bde2012-08-31 14:21:28 +02001199/**
Mike Turquetteb24764902012-03-15 23:11:19 -07001200 * __clk_speculate_rates
1201 * @clk: first clk in the subtree
1202 * @parent_rate: the "future" rate of clk's parent
1203 *
1204 * Walks the subtree of clks starting with clk, speculating rates as it
1205 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1206 *
1207 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1208 * pre-rate change notifications and returns early if no clks in the
1209 * subtree have subscribed to the notifications. Note that if a clk does not
1210 * implement the .recalc_rate callback then it is assumed that the clock will
Peter Meerwald24ee1a02013-06-29 15:14:19 +02001211 * take on the rate of its parent.
Mike Turquetteb24764902012-03-15 23:11:19 -07001212 *
1213 * Caller must hold prepare_lock.
1214 */
1215static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
1216{
Mike Turquetteb24764902012-03-15 23:11:19 -07001217 struct clk *child;
1218 unsigned long new_rate;
1219 int ret = NOTIFY_DONE;
1220
1221 if (clk->ops->recalc_rate)
1222 new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
1223 else
1224 new_rate = parent_rate;
1225
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001226 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
Mike Turquetteb24764902012-03-15 23:11:19 -07001227 if (clk->notifier_count)
1228 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
1229
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001230 if (ret & NOTIFY_STOP_MASK)
Mike Turquetteb24764902012-03-15 23:11:19 -07001231 goto out;
1232
Sasha Levinb67bfe02013-02-27 17:06:00 -08001233 hlist_for_each_entry(child, &clk->children, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -07001234 ret = __clk_speculate_rates(child, new_rate);
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001235 if (ret & NOTIFY_STOP_MASK)
Mike Turquetteb24764902012-03-15 23:11:19 -07001236 break;
1237 }
1238
1239out:
1240 return ret;
1241}
1242
James Hogan71472c02013-07-29 12:25:00 +01001243static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
1244 struct clk *new_parent, u8 p_index)
Mike Turquetteb24764902012-03-15 23:11:19 -07001245{
1246 struct clk *child;
Mike Turquetteb24764902012-03-15 23:11:19 -07001247
1248 clk->new_rate = new_rate;
James Hogan71472c02013-07-29 12:25:00 +01001249 clk->new_parent = new_parent;
1250 clk->new_parent_index = p_index;
1251 /* include clk in new parent's PRE_RATE_CHANGE notifications */
1252 clk->new_child = NULL;
1253 if (new_parent && new_parent != clk->parent)
1254 new_parent->new_child = clk;
Mike Turquetteb24764902012-03-15 23:11:19 -07001255
Sasha Levinb67bfe02013-02-27 17:06:00 -08001256 hlist_for_each_entry(child, &clk->children, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -07001257 if (child->ops->recalc_rate)
1258 child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
1259 else
1260 child->new_rate = new_rate;
James Hogan71472c02013-07-29 12:25:00 +01001261 clk_calc_subtree(child, child->new_rate, NULL, 0);
Mike Turquetteb24764902012-03-15 23:11:19 -07001262 }
1263}
1264
1265/*
1266 * calculate the new rates returning the topmost clock that has to be
1267 * changed.
1268 */
1269static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
1270{
1271 struct clk *top = clk;
James Hogan71472c02013-07-29 12:25:00 +01001272 struct clk *old_parent, *parent;
Shawn Guo81536e02012-04-12 20:50:17 +08001273 unsigned long best_parent_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001274 unsigned long new_rate;
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001275 int p_index = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001276
Mike Turquette7452b212012-03-26 14:45:36 -07001277 /* sanity */
1278 if (IS_ERR_OR_NULL(clk))
1279 return NULL;
1280
Mike Turquette63f5c3b2012-05-02 16:23:43 -07001281 /* save parent rate, if it exists */
James Hogan71472c02013-07-29 12:25:00 +01001282 parent = old_parent = clk->parent;
1283 if (parent)
1284 best_parent_rate = parent->rate;
Mike Turquette63f5c3b2012-05-02 16:23:43 -07001285
James Hogan71472c02013-07-29 12:25:00 +01001286 /* find the closest rate and parent clk/rate */
1287 if (clk->ops->determine_rate) {
1288 new_rate = clk->ops->determine_rate(clk->hw, rate,
1289 &best_parent_rate,
1290 &parent);
1291 } else if (clk->ops->round_rate) {
1292 new_rate = clk->ops->round_rate(clk->hw, rate,
1293 &best_parent_rate);
1294 } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
1295 /* pass-through clock without adjustable parent */
1296 clk->new_rate = clk->rate;
1297 return NULL;
1298 } else {
1299 /* pass-through clock with adjustable parent */
1300 top = clk_calc_new_rates(parent, rate);
1301 new_rate = parent->new_rate;
Mike Turquette63f5c3b2012-05-02 16:23:43 -07001302 goto out;
Mike Turquette7452b212012-03-26 14:45:36 -07001303 }
1304
James Hogan71472c02013-07-29 12:25:00 +01001305 /* some clocks must be gated to change parent */
1306 if (parent != old_parent &&
1307 (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
1308 pr_debug("%s: %s not gated but wants to reparent\n",
1309 __func__, clk->name);
Mike Turquetteb24764902012-03-15 23:11:19 -07001310 return NULL;
1311 }
1312
James Hogan71472c02013-07-29 12:25:00 +01001313 /* try finding the new parent index */
1314 if (parent) {
1315 p_index = clk_fetch_parent_index(clk, parent);
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001316 if (p_index < 0) {
James Hogan71472c02013-07-29 12:25:00 +01001317 pr_debug("%s: clk %s can not be parent of clk %s\n",
1318 __func__, parent->name, clk->name);
1319 return NULL;
1320 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001321 }
1322
James Hogan71472c02013-07-29 12:25:00 +01001323 if ((clk->flags & CLK_SET_RATE_PARENT) && parent &&
1324 best_parent_rate != parent->rate)
1325 top = clk_calc_new_rates(parent, best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001326
1327out:
James Hogan71472c02013-07-29 12:25:00 +01001328 clk_calc_subtree(clk, new_rate, parent, p_index);
Mike Turquetteb24764902012-03-15 23:11:19 -07001329
1330 return top;
1331}
1332
1333/*
1334 * Notify about rate changes in a subtree. Always walk down the whole tree
1335 * so that in case of an error we can walk down the whole tree again and
1336 * abort the change.
1337 */
1338static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
1339{
James Hogan71472c02013-07-29 12:25:00 +01001340 struct clk *child, *tmp_clk, *fail_clk = NULL;
Mike Turquetteb24764902012-03-15 23:11:19 -07001341 int ret = NOTIFY_DONE;
1342
1343 if (clk->rate == clk->new_rate)
Sachin Kamat5fda6852013-03-13 15:17:49 +05301344 return NULL;
Mike Turquetteb24764902012-03-15 23:11:19 -07001345
1346 if (clk->notifier_count) {
1347 ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001348 if (ret & NOTIFY_STOP_MASK)
Mike Turquetteb24764902012-03-15 23:11:19 -07001349 fail_clk = clk;
1350 }
1351
Sasha Levinb67bfe02013-02-27 17:06:00 -08001352 hlist_for_each_entry(child, &clk->children, child_node) {
James Hogan71472c02013-07-29 12:25:00 +01001353 /* Skip children who will be reparented to another clock */
1354 if (child->new_parent && child->new_parent != clk)
1355 continue;
1356 tmp_clk = clk_propagate_rate_change(child, event);
1357 if (tmp_clk)
1358 fail_clk = tmp_clk;
1359 }
1360
1361 /* handle the new child who might not be in clk->children yet */
1362 if (clk->new_child) {
1363 tmp_clk = clk_propagate_rate_change(clk->new_child, event);
1364 if (tmp_clk)
1365 fail_clk = tmp_clk;
Mike Turquetteb24764902012-03-15 23:11:19 -07001366 }
1367
1368 return fail_clk;
1369}
1370
1371/*
1372 * walk down a subtree and set the new rates notifying the rate
1373 * change on the way
1374 */
1375static void clk_change_rate(struct clk *clk)
1376{
1377 struct clk *child;
1378 unsigned long old_rate;
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001379 unsigned long best_parent_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001380
1381 old_rate = clk->rate;
1382
James Hogan71472c02013-07-29 12:25:00 +01001383 /* set parent */
1384 if (clk->new_parent && clk->new_parent != clk->parent)
1385 __clk_set_parent(clk, clk->new_parent, clk->new_parent_index);
1386
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001387 if (clk->parent)
1388 best_parent_rate = clk->parent->rate;
1389
Mike Turquetteb24764902012-03-15 23:11:19 -07001390 if (clk->ops->set_rate)
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001391 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001392
1393 if (clk->ops->recalc_rate)
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001394 clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001395 else
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001396 clk->rate = best_parent_rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07001397
1398 if (clk->notifier_count && old_rate != clk->rate)
1399 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
1400
James Hogan71472c02013-07-29 12:25:00 +01001401 hlist_for_each_entry(child, &clk->children, child_node) {
1402 /* Skip children who will be reparented to another clock */
1403 if (child->new_parent && child->new_parent != clk)
1404 continue;
Mike Turquetteb24764902012-03-15 23:11:19 -07001405 clk_change_rate(child);
James Hogan71472c02013-07-29 12:25:00 +01001406 }
1407
1408 /* handle the new child who might not be in clk->children yet */
1409 if (clk->new_child)
1410 clk_change_rate(clk->new_child);
Mike Turquetteb24764902012-03-15 23:11:19 -07001411}
1412
1413/**
1414 * clk_set_rate - specify a new rate for clk
1415 * @clk: the clk whose rate is being changed
1416 * @rate: the new rate for clk
1417 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001418 * In the simplest case clk_set_rate will only adjust the rate of clk.
Mike Turquetteb24764902012-03-15 23:11:19 -07001419 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001420 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1421 * propagate up to clk's parent; whether or not this happens depends on the
1422 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
1423 * after calling .round_rate then upstream parent propagation is ignored. If
1424 * *parent_rate comes back with a new rate for clk's parent then we propagate
Peter Meerwald24ee1a02013-06-29 15:14:19 +02001425 * up to clk's parent and set its rate. Upward propagation will continue
Mike Turquette5654dc92012-03-26 11:51:34 -07001426 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1427 * .round_rate stops requesting changes to clk's parent_rate.
Mike Turquetteb24764902012-03-15 23:11:19 -07001428 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001429 * Rate changes are accomplished via tree traversal that also recalculates the
1430 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
Mike Turquetteb24764902012-03-15 23:11:19 -07001431 *
1432 * Returns 0 on success, -EERROR otherwise.
1433 */
1434int clk_set_rate(struct clk *clk, unsigned long rate)
1435{
1436 struct clk *top, *fail_clk;
1437 int ret = 0;
1438
Mike Turquette89ac8d72013-08-21 23:58:09 -07001439 if (!clk)
1440 return 0;
1441
Mike Turquetteb24764902012-03-15 23:11:19 -07001442 /* prevent racing with updates to the clock topology */
Mike Turquetteeab89f62013-03-28 13:59:01 -07001443 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001444
1445 /* bail early if nothing to do */
Peter De Schrijver34e452a2013-06-05 18:06:36 +03001446 if (rate == clk_get_rate(clk))
Mike Turquetteb24764902012-03-15 23:11:19 -07001447 goto out;
1448
Saravana Kannan7e0fa1b2012-05-15 13:43:42 -07001449 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
Viresh Kumar0e1c0302012-04-11 16:03:42 +05301450 ret = -EBUSY;
1451 goto out;
1452 }
1453
Mike Turquetteb24764902012-03-15 23:11:19 -07001454 /* calculate new rates and get the topmost changed clock */
1455 top = clk_calc_new_rates(clk, rate);
1456 if (!top) {
1457 ret = -EINVAL;
1458 goto out;
1459 }
1460
1461 /* notify that we are about to change rates */
1462 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1463 if (fail_clk) {
1464 pr_warn("%s: failed to set %s rate\n", __func__,
1465 fail_clk->name);
1466 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1467 ret = -EBUSY;
1468 goto out;
1469 }
1470
1471 /* change the rates */
1472 clk_change_rate(top);
1473
Mike Turquetteb24764902012-03-15 23:11:19 -07001474out:
Mike Turquetteeab89f62013-03-28 13:59:01 -07001475 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001476
1477 return ret;
1478}
1479EXPORT_SYMBOL_GPL(clk_set_rate);
1480
1481/**
1482 * clk_get_parent - return the parent of a clk
1483 * @clk: the clk whose parent gets returned
1484 *
1485 * Simply returns clk->parent. Returns NULL if clk is NULL.
1486 */
1487struct clk *clk_get_parent(struct clk *clk)
1488{
1489 struct clk *parent;
1490
Mike Turquetteeab89f62013-03-28 13:59:01 -07001491 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001492 parent = __clk_get_parent(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -07001493 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001494
1495 return parent;
1496}
1497EXPORT_SYMBOL_GPL(clk_get_parent);
1498
1499/*
1500 * .get_parent is mandatory for clocks with multiple possible parents. It is
1501 * optional for single-parent clocks. Always call .get_parent if it is
1502 * available and WARN if it is missing for multi-parent clocks.
1503 *
1504 * For single-parent clocks without .get_parent, first check to see if the
1505 * .parents array exists, and if so use it to avoid an expensive tree
1506 * traversal. If .parents does not exist then walk the tree with __clk_lookup.
1507 */
1508static struct clk *__clk_init_parent(struct clk *clk)
1509{
1510 struct clk *ret = NULL;
1511 u8 index;
1512
1513 /* handle the trivial cases */
1514
1515 if (!clk->num_parents)
1516 goto out;
1517
1518 if (clk->num_parents == 1) {
1519 if (IS_ERR_OR_NULL(clk->parent))
1520 ret = clk->parent = __clk_lookup(clk->parent_names[0]);
1521 ret = clk->parent;
1522 goto out;
1523 }
1524
1525 if (!clk->ops->get_parent) {
1526 WARN(!clk->ops->get_parent,
1527 "%s: multi-parent clocks must implement .get_parent\n",
1528 __func__);
1529 goto out;
1530 };
1531
1532 /*
1533 * Do our best to cache parent clocks in clk->parents. This prevents
1534 * unnecessary and expensive calls to __clk_lookup. We don't set
1535 * clk->parent here; that is done by the calling function
1536 */
1537
1538 index = clk->ops->get_parent(clk->hw);
1539
1540 if (!clk->parents)
1541 clk->parents =
Tomasz Figa96a7ed92013-09-29 02:37:15 +02001542 kcalloc(clk->num_parents, sizeof(struct clk *),
Mike Turquetteb24764902012-03-15 23:11:19 -07001543 GFP_KERNEL);
1544
James Hogan7ef3dcc2013-07-29 12:24:58 +01001545 ret = clk_get_parent_by_index(clk, index);
Mike Turquetteb24764902012-03-15 23:11:19 -07001546
1547out:
1548 return ret;
1549}
1550
Ulf Hanssonb33d2122013-04-02 23:09:37 +02001551void __clk_reparent(struct clk *clk, struct clk *new_parent)
1552{
1553 clk_reparent(clk, new_parent);
1554 clk_debug_reparent(clk, new_parent);
Mike Turquetteb24764902012-03-15 23:11:19 -07001555 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1556}
1557
Mike Turquetteb24764902012-03-15 23:11:19 -07001558/**
1559 * clk_set_parent - switch the parent of a mux clk
1560 * @clk: the mux clk whose input we are switching
1561 * @parent: the new input to clk
1562 *
Saravana Kannanf8aa0bd2013-05-15 21:07:24 -07001563 * Re-parent clk to use parent as its new input source. If clk is in
1564 * prepared state, the clk will get enabled for the duration of this call. If
1565 * that's not acceptable for a specific clk (Eg: the consumer can't handle
1566 * that, the reparenting is glitchy in hardware, etc), use the
1567 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
1568 *
1569 * After successfully changing clk's parent clk_set_parent will update the
1570 * clk topology, sysfs topology and propagate rate recalculation via
1571 * __clk_recalc_rates.
1572 *
1573 * Returns 0 on success, -EERROR otherwise.
Mike Turquetteb24764902012-03-15 23:11:19 -07001574 */
1575int clk_set_parent(struct clk *clk, struct clk *parent)
1576{
1577 int ret = 0;
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001578 int p_index = 0;
Ulf Hansson031dcc92013-04-02 23:09:38 +02001579 unsigned long p_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001580
Mike Turquette89ac8d72013-08-21 23:58:09 -07001581 if (!clk)
1582 return 0;
1583
1584 if (!clk->ops)
Mike Turquetteb24764902012-03-15 23:11:19 -07001585 return -EINVAL;
1586
Ulf Hansson031dcc92013-04-02 23:09:38 +02001587 /* verify ops for for multi-parent clks */
1588 if ((clk->num_parents > 1) && (!clk->ops->set_parent))
Mike Turquetteb24764902012-03-15 23:11:19 -07001589 return -ENOSYS;
1590
1591 /* prevent racing with updates to the clock topology */
Mike Turquetteeab89f62013-03-28 13:59:01 -07001592 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001593
1594 if (clk->parent == parent)
1595 goto out;
1596
Ulf Hansson031dcc92013-04-02 23:09:38 +02001597 /* check that we are allowed to re-parent if the clock is in use */
1598 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
1599 ret = -EBUSY;
1600 goto out;
1601 }
1602
1603 /* try finding the new parent index */
1604 if (parent) {
1605 p_index = clk_fetch_parent_index(clk, parent);
1606 p_rate = parent->rate;
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001607 if (p_index < 0) {
Ulf Hansson031dcc92013-04-02 23:09:38 +02001608 pr_debug("%s: clk %s can not be parent of clk %s\n",
1609 __func__, parent->name, clk->name);
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001610 ret = p_index;
Ulf Hansson031dcc92013-04-02 23:09:38 +02001611 goto out;
1612 }
1613 }
1614
Mike Turquetteb24764902012-03-15 23:11:19 -07001615 /* propagate PRE_RATE_CHANGE notifications */
Soren Brinkmannf3aab5d2013-04-16 10:06:50 -07001616 ret = __clk_speculate_rates(clk, p_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001617
1618 /* abort if a driver objects */
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001619 if (ret & NOTIFY_STOP_MASK)
Mike Turquetteb24764902012-03-15 23:11:19 -07001620 goto out;
1621
Ulf Hansson031dcc92013-04-02 23:09:38 +02001622 /* do the re-parent */
1623 ret = __clk_set_parent(clk, parent, p_index);
Mike Turquetteb24764902012-03-15 23:11:19 -07001624
Ulf Hanssona68de8e2013-04-02 23:09:39 +02001625 /* propagate rate recalculation accordingly */
1626 if (ret)
Mike Turquetteb24764902012-03-15 23:11:19 -07001627 __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
Ulf Hanssona68de8e2013-04-02 23:09:39 +02001628 else
1629 __clk_recalc_rates(clk, POST_RATE_CHANGE);
Mike Turquetteb24764902012-03-15 23:11:19 -07001630
1631out:
Mike Turquetteeab89f62013-03-28 13:59:01 -07001632 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001633
1634 return ret;
1635}
1636EXPORT_SYMBOL_GPL(clk_set_parent);
1637
1638/**
1639 * __clk_init - initialize the data structures in a struct clk
1640 * @dev: device initializing this clk, placeholder for now
1641 * @clk: clk being initialized
1642 *
1643 * Initializes the lists in struct clk, queries the hardware for the
1644 * parent and rate and sets them both.
Mike Turquetteb24764902012-03-15 23:11:19 -07001645 */
Mike Turquetted1302a32012-03-29 14:30:40 -07001646int __clk_init(struct device *dev, struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -07001647{
Mike Turquetted1302a32012-03-29 14:30:40 -07001648 int i, ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001649 struct clk *orphan;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001650 struct hlist_node *tmp2;
Mike Turquetteb24764902012-03-15 23:11:19 -07001651
1652 if (!clk)
Mike Turquetted1302a32012-03-29 14:30:40 -07001653 return -EINVAL;
Mike Turquetteb24764902012-03-15 23:11:19 -07001654
Mike Turquetteeab89f62013-03-28 13:59:01 -07001655 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001656
1657 /* check to see if a clock with this name is already registered */
Mike Turquetted1302a32012-03-29 14:30:40 -07001658 if (__clk_lookup(clk->name)) {
1659 pr_debug("%s: clk %s already initialized\n",
1660 __func__, clk->name);
1661 ret = -EEXIST;
Mike Turquetteb24764902012-03-15 23:11:19 -07001662 goto out;
Mike Turquetted1302a32012-03-29 14:30:40 -07001663 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001664
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07001665 /* check that clk_ops are sane. See Documentation/clk.txt */
1666 if (clk->ops->set_rate &&
James Hogan71472c02013-07-29 12:25:00 +01001667 !((clk->ops->round_rate || clk->ops->determine_rate) &&
1668 clk->ops->recalc_rate)) {
1669 pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07001670 __func__, clk->name);
Mike Turquetted1302a32012-03-29 14:30:40 -07001671 ret = -EINVAL;
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07001672 goto out;
1673 }
1674
1675 if (clk->ops->set_parent && !clk->ops->get_parent) {
1676 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1677 __func__, clk->name);
Mike Turquetted1302a32012-03-29 14:30:40 -07001678 ret = -EINVAL;
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07001679 goto out;
1680 }
1681
Mike Turquetteb24764902012-03-15 23:11:19 -07001682 /* throw a WARN if any entries in parent_names are NULL */
1683 for (i = 0; i < clk->num_parents; i++)
1684 WARN(!clk->parent_names[i],
1685 "%s: invalid NULL in %s's .parent_names\n",
1686 __func__, clk->name);
1687
1688 /*
1689 * Allocate an array of struct clk *'s to avoid unnecessary string
1690 * look-ups of clk's possible parents. This can fail for clocks passed
1691 * in to clk_init during early boot; thus any access to clk->parents[]
1692 * must always check for a NULL pointer and try to populate it if
1693 * necessary.
1694 *
1695 * If clk->parents is not NULL we skip this entire block. This allows
1696 * for clock drivers to statically initialize clk->parents.
1697 */
Rajendra Nayak9ca1c5a2012-06-06 14:41:30 +05301698 if (clk->num_parents > 1 && !clk->parents) {
Tomasz Figa96a7ed92013-09-29 02:37:15 +02001699 clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *),
1700 GFP_KERNEL);
Mike Turquetteb24764902012-03-15 23:11:19 -07001701 /*
1702 * __clk_lookup returns NULL for parents that have not been
1703 * clk_init'd; thus any access to clk->parents[] must check
1704 * for a NULL pointer. We can always perform lazy lookups for
1705 * missing parents later on.
1706 */
1707 if (clk->parents)
1708 for (i = 0; i < clk->num_parents; i++)
1709 clk->parents[i] =
1710 __clk_lookup(clk->parent_names[i]);
1711 }
1712
1713 clk->parent = __clk_init_parent(clk);
1714
1715 /*
1716 * Populate clk->parent if parent has already been __clk_init'd. If
1717 * parent has not yet been __clk_init'd then place clk in the orphan
1718 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
1719 * clk list.
1720 *
1721 * Every time a new clk is clk_init'd then we walk the list of orphan
1722 * clocks and re-parent any that are children of the clock currently
1723 * being clk_init'd.
1724 */
1725 if (clk->parent)
1726 hlist_add_head(&clk->child_node,
1727 &clk->parent->children);
1728 else if (clk->flags & CLK_IS_ROOT)
1729 hlist_add_head(&clk->child_node, &clk_root_list);
1730 else
1731 hlist_add_head(&clk->child_node, &clk_orphan_list);
1732
1733 /*
1734 * Set clk's rate. The preferred method is to use .recalc_rate. For
1735 * simple clocks and lazy developers the default fallback is to use the
1736 * parent's rate. If a clock doesn't have a parent (or is orphaned)
1737 * then rate is set to zero.
1738 */
1739 if (clk->ops->recalc_rate)
1740 clk->rate = clk->ops->recalc_rate(clk->hw,
1741 __clk_get_rate(clk->parent));
1742 else if (clk->parent)
1743 clk->rate = clk->parent->rate;
1744 else
1745 clk->rate = 0;
1746
Stephen Boyd3a5aec22013-10-16 00:40:03 -07001747 clk_debug_register(clk);
Mike Turquetteb24764902012-03-15 23:11:19 -07001748 /*
1749 * walk the list of orphan clocks and reparent any that are children of
1750 * this clock
1751 */
Sasha Levinb67bfe02013-02-27 17:06:00 -08001752 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
Alex Elder12d298862013-09-05 08:33:24 -05001753 if (orphan->num_parents && orphan->ops->get_parent) {
Martin Fuzzey1f61e5f2012-11-22 20:15:05 +01001754 i = orphan->ops->get_parent(orphan->hw);
1755 if (!strcmp(clk->name, orphan->parent_names[i]))
1756 __clk_reparent(orphan, clk);
1757 continue;
1758 }
1759
Mike Turquetteb24764902012-03-15 23:11:19 -07001760 for (i = 0; i < orphan->num_parents; i++)
1761 if (!strcmp(clk->name, orphan->parent_names[i])) {
1762 __clk_reparent(orphan, clk);
1763 break;
1764 }
Martin Fuzzey1f61e5f2012-11-22 20:15:05 +01001765 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001766
1767 /*
1768 * optional platform-specific magic
1769 *
1770 * The .init callback is not used by any of the basic clock types, but
1771 * exists for weird hardware that must perform initialization magic.
1772 * Please consider other ways of solving initialization problems before
Peter Meerwald24ee1a02013-06-29 15:14:19 +02001773 * using this callback, as its use is discouraged.
Mike Turquetteb24764902012-03-15 23:11:19 -07001774 */
1775 if (clk->ops->init)
1776 clk->ops->init(clk->hw);
1777
Mike Turquetteb24764902012-03-15 23:11:19 -07001778out:
Mike Turquetteeab89f62013-03-28 13:59:01 -07001779 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001780
Mike Turquetted1302a32012-03-29 14:30:40 -07001781 return ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07001782}
1783
1784/**
Saravana Kannan0197b3e2012-04-25 22:58:56 -07001785 * __clk_register - register a clock and return a cookie.
1786 *
1787 * Same as clk_register, except that the .clk field inside hw shall point to a
1788 * preallocated (generally statically allocated) struct clk. None of the fields
1789 * of the struct clk need to be initialized.
1790 *
1791 * The data pointed to by .init and .clk field shall NOT be marked as init
1792 * data.
1793 *
1794 * __clk_register is only exposed via clk-private.h and is intended for use with
1795 * very large numbers of clocks that need to be statically initialized. It is
1796 * a layering violation to include clk-private.h from any code which implements
1797 * a clock's .ops; as such any statically initialized clock data MUST be in a
Peter Meerwald24ee1a02013-06-29 15:14:19 +02001798 * separate C file from the logic that implements its operations. Returns 0
Saravana Kannan0197b3e2012-04-25 22:58:56 -07001799 * on success, otherwise an error code.
1800 */
1801struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1802{
1803 int ret;
1804 struct clk *clk;
1805
1806 clk = hw->clk;
1807 clk->name = hw->init->name;
1808 clk->ops = hw->init->ops;
1809 clk->hw = hw;
1810 clk->flags = hw->init->flags;
1811 clk->parent_names = hw->init->parent_names;
1812 clk->num_parents = hw->init->num_parents;
1813
1814 ret = __clk_init(dev, clk);
1815 if (ret)
1816 return ERR_PTR(ret);
1817
1818 return clk;
1819}
1820EXPORT_SYMBOL_GPL(__clk_register);
1821
Stephen Boyd46c87732012-09-24 13:38:04 -07001822static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -07001823{
Mike Turquetted1302a32012-03-29 14:30:40 -07001824 int i, ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07001825
Saravana Kannan0197b3e2012-04-25 22:58:56 -07001826 clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1827 if (!clk->name) {
1828 pr_err("%s: could not allocate clk->name\n", __func__);
1829 ret = -ENOMEM;
1830 goto fail_name;
1831 }
1832 clk->ops = hw->init->ops;
Mike Turquetteb24764902012-03-15 23:11:19 -07001833 clk->hw = hw;
Saravana Kannan0197b3e2012-04-25 22:58:56 -07001834 clk->flags = hw->init->flags;
1835 clk->num_parents = hw->init->num_parents;
Mike Turquetteb24764902012-03-15 23:11:19 -07001836 hw->clk = clk;
1837
Mike Turquetted1302a32012-03-29 14:30:40 -07001838 /* allocate local copy in case parent_names is __initdata */
Tomasz Figa96a7ed92013-09-29 02:37:15 +02001839 clk->parent_names = kcalloc(clk->num_parents, sizeof(char *),
1840 GFP_KERNEL);
Mike Turquetteb24764902012-03-15 23:11:19 -07001841
Mike Turquetted1302a32012-03-29 14:30:40 -07001842 if (!clk->parent_names) {
1843 pr_err("%s: could not allocate clk->parent_names\n", __func__);
1844 ret = -ENOMEM;
1845 goto fail_parent_names;
1846 }
1847
1848
1849 /* copy each string name in case parent_names is __initdata */
Saravana Kannan0197b3e2012-04-25 22:58:56 -07001850 for (i = 0; i < clk->num_parents; i++) {
1851 clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
1852 GFP_KERNEL);
Mike Turquetted1302a32012-03-29 14:30:40 -07001853 if (!clk->parent_names[i]) {
1854 pr_err("%s: could not copy parent_names\n", __func__);
1855 ret = -ENOMEM;
1856 goto fail_parent_names_copy;
1857 }
1858 }
1859
1860 ret = __clk_init(dev, clk);
1861 if (!ret)
Stephen Boyd46c87732012-09-24 13:38:04 -07001862 return 0;
Mike Turquetted1302a32012-03-29 14:30:40 -07001863
1864fail_parent_names_copy:
1865 while (--i >= 0)
1866 kfree(clk->parent_names[i]);
1867 kfree(clk->parent_names);
1868fail_parent_names:
Saravana Kannan0197b3e2012-04-25 22:58:56 -07001869 kfree(clk->name);
1870fail_name:
Stephen Boyd46c87732012-09-24 13:38:04 -07001871 return ret;
1872}
1873
1874/**
1875 * clk_register - allocate a new clock, register it and return an opaque cookie
1876 * @dev: device that is registering this clock
1877 * @hw: link to hardware-specific clock data
1878 *
1879 * clk_register is the primary interface for populating the clock tree with new
1880 * clock nodes. It returns a pointer to the newly allocated struct clk which
1881 * cannot be dereferenced by driver code but may be used in conjuction with the
1882 * rest of the clock API. In the event of an error clk_register will return an
1883 * error code; drivers must test for an error code after calling clk_register.
1884 */
1885struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1886{
1887 int ret;
1888 struct clk *clk;
1889
1890 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1891 if (!clk) {
1892 pr_err("%s: could not allocate clk\n", __func__);
1893 ret = -ENOMEM;
1894 goto fail_out;
1895 }
1896
1897 ret = _clk_register(dev, hw, clk);
1898 if (!ret)
1899 return clk;
1900
Mike Turquetted1302a32012-03-29 14:30:40 -07001901 kfree(clk);
1902fail_out:
1903 return ERR_PTR(ret);
Mike Turquetteb24764902012-03-15 23:11:19 -07001904}
1905EXPORT_SYMBOL_GPL(clk_register);
1906
Mark Brown1df5c932012-04-18 09:07:12 +01001907/**
1908 * clk_unregister - unregister a currently registered clock
1909 * @clk: clock to unregister
1910 *
1911 * Currently unimplemented.
1912 */
1913void clk_unregister(struct clk *clk) {}
1914EXPORT_SYMBOL_GPL(clk_unregister);
1915
Stephen Boyd46c87732012-09-24 13:38:04 -07001916static void devm_clk_release(struct device *dev, void *res)
1917{
1918 clk_unregister(res);
1919}
1920
1921/**
1922 * devm_clk_register - resource managed clk_register()
1923 * @dev: device that is registering this clock
1924 * @hw: link to hardware-specific clock data
1925 *
1926 * Managed clk_register(). Clocks returned from this function are
1927 * automatically clk_unregister()ed on driver detach. See clk_register() for
1928 * more information.
1929 */
1930struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
1931{
1932 struct clk *clk;
1933 int ret;
1934
1935 clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL);
1936 if (!clk)
1937 return ERR_PTR(-ENOMEM);
1938
1939 ret = _clk_register(dev, hw, clk);
1940 if (!ret) {
1941 devres_add(dev, clk);
1942 } else {
1943 devres_free(clk);
1944 clk = ERR_PTR(ret);
1945 }
1946
1947 return clk;
1948}
1949EXPORT_SYMBOL_GPL(devm_clk_register);
1950
1951static int devm_clk_match(struct device *dev, void *res, void *data)
1952{
1953 struct clk *c = res;
1954 if (WARN_ON(!c))
1955 return 0;
1956 return c == data;
1957}
1958
1959/**
1960 * devm_clk_unregister - resource managed clk_unregister()
1961 * @clk: clock to unregister
1962 *
1963 * Deallocate a clock allocated with devm_clk_register(). Normally
1964 * this function will not need to be called and the resource management
1965 * code will ensure that the resource is freed.
1966 */
1967void devm_clk_unregister(struct device *dev, struct clk *clk)
1968{
1969 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
1970}
1971EXPORT_SYMBOL_GPL(devm_clk_unregister);
1972
Mike Turquetteb24764902012-03-15 23:11:19 -07001973/*** clk rate change notifiers ***/
1974
1975/**
1976 * clk_notifier_register - add a clk rate change notifier
1977 * @clk: struct clk * to watch
1978 * @nb: struct notifier_block * with callback info
1979 *
1980 * Request notification when clk's rate changes. This uses an SRCU
1981 * notifier because we want it to block and notifier unregistrations are
1982 * uncommon. The callbacks associated with the notifier must not
1983 * re-enter into the clk framework by calling any top-level clk APIs;
1984 * this will cause a nested prepare_lock mutex.
1985 *
1986 * Pre-change notifier callbacks will be passed the current, pre-change
1987 * rate of the clk via struct clk_notifier_data.old_rate. The new,
1988 * post-change rate of the clk is passed via struct
1989 * clk_notifier_data.new_rate.
1990 *
1991 * Post-change notifiers will pass the now-current, post-change rate of
1992 * the clk in both struct clk_notifier_data.old_rate and struct
1993 * clk_notifier_data.new_rate.
1994 *
1995 * Abort-change notifiers are effectively the opposite of pre-change
1996 * notifiers: the original pre-change clk rate is passed in via struct
1997 * clk_notifier_data.new_rate and the failed post-change rate is passed
1998 * in via struct clk_notifier_data.old_rate.
1999 *
2000 * clk_notifier_register() must be called from non-atomic context.
2001 * Returns -EINVAL if called with null arguments, -ENOMEM upon
2002 * allocation failure; otherwise, passes along the return value of
2003 * srcu_notifier_chain_register().
2004 */
2005int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
2006{
2007 struct clk_notifier *cn;
2008 int ret = -ENOMEM;
2009
2010 if (!clk || !nb)
2011 return -EINVAL;
2012
Mike Turquetteeab89f62013-03-28 13:59:01 -07002013 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002014
2015 /* search the list of notifiers for this clk */
2016 list_for_each_entry(cn, &clk_notifier_list, node)
2017 if (cn->clk == clk)
2018 break;
2019
2020 /* if clk wasn't in the notifier list, allocate new clk_notifier */
2021 if (cn->clk != clk) {
2022 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
2023 if (!cn)
2024 goto out;
2025
2026 cn->clk = clk;
2027 srcu_init_notifier_head(&cn->notifier_head);
2028
2029 list_add(&cn->node, &clk_notifier_list);
2030 }
2031
2032 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
2033
2034 clk->notifier_count++;
2035
2036out:
Mike Turquetteeab89f62013-03-28 13:59:01 -07002037 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002038
2039 return ret;
2040}
2041EXPORT_SYMBOL_GPL(clk_notifier_register);
2042
2043/**
2044 * clk_notifier_unregister - remove a clk rate change notifier
2045 * @clk: struct clk *
2046 * @nb: struct notifier_block * with callback info
2047 *
2048 * Request no further notification for changes to 'clk' and frees memory
2049 * allocated in clk_notifier_register.
2050 *
2051 * Returns -EINVAL if called with null arguments; otherwise, passes
2052 * along the return value of srcu_notifier_chain_unregister().
2053 */
2054int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
2055{
2056 struct clk_notifier *cn = NULL;
2057 int ret = -EINVAL;
2058
2059 if (!clk || !nb)
2060 return -EINVAL;
2061
Mike Turquetteeab89f62013-03-28 13:59:01 -07002062 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002063
2064 list_for_each_entry(cn, &clk_notifier_list, node)
2065 if (cn->clk == clk)
2066 break;
2067
2068 if (cn->clk == clk) {
2069 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
2070
2071 clk->notifier_count--;
2072
2073 /* XXX the notifier code should handle this better */
2074 if (!cn->notifier_head.head) {
2075 srcu_cleanup_notifier_head(&cn->notifier_head);
Lai Jiangshan72b53222013-06-03 17:17:15 +08002076 list_del(&cn->node);
Mike Turquetteb24764902012-03-15 23:11:19 -07002077 kfree(cn);
2078 }
2079
2080 } else {
2081 ret = -ENOENT;
2082 }
2083
Mike Turquetteeab89f62013-03-28 13:59:01 -07002084 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002085
2086 return ret;
2087}
2088EXPORT_SYMBOL_GPL(clk_notifier_unregister);
Grant Likely766e6a42012-04-09 14:50:06 -05002089
2090#ifdef CONFIG_OF
2091/**
2092 * struct of_clk_provider - Clock provider registration structure
2093 * @link: Entry in global list of clock providers
2094 * @node: Pointer to device tree node of clock provider
2095 * @get: Get clock callback. Returns NULL or a struct clk for the
2096 * given clock specifier
2097 * @data: context pointer to be passed into @get callback
2098 */
2099struct of_clk_provider {
2100 struct list_head link;
2101
2102 struct device_node *node;
2103 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
2104 void *data;
2105};
2106
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +05302107extern struct of_device_id __clk_of_table[];
2108
2109static const struct of_device_id __clk_of_table_sentinel
2110 __used __section(__clk_of_table_end);
2111
Grant Likely766e6a42012-04-09 14:50:06 -05002112static LIST_HEAD(of_clk_providers);
2113static DEFINE_MUTEX(of_clk_lock);
2114
2115struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
2116 void *data)
2117{
2118 return data;
2119}
2120EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
2121
Shawn Guo494bfec2012-08-22 21:36:27 +08002122struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
2123{
2124 struct clk_onecell_data *clk_data = data;
2125 unsigned int idx = clkspec->args[0];
2126
2127 if (idx >= clk_data->clk_num) {
2128 pr_err("%s: invalid clock index %d\n", __func__, idx);
2129 return ERR_PTR(-EINVAL);
2130 }
2131
2132 return clk_data->clks[idx];
2133}
2134EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
2135
Grant Likely766e6a42012-04-09 14:50:06 -05002136/**
2137 * of_clk_add_provider() - Register a clock provider for a node
2138 * @np: Device node pointer associated with clock provider
2139 * @clk_src_get: callback for decoding clock
2140 * @data: context pointer for @clk_src_get callback.
2141 */
2142int of_clk_add_provider(struct device_node *np,
2143 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
2144 void *data),
2145 void *data)
2146{
2147 struct of_clk_provider *cp;
2148
2149 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
2150 if (!cp)
2151 return -ENOMEM;
2152
2153 cp->node = of_node_get(np);
2154 cp->data = data;
2155 cp->get = clk_src_get;
2156
2157 mutex_lock(&of_clk_lock);
2158 list_add(&cp->link, &of_clk_providers);
2159 mutex_unlock(&of_clk_lock);
2160 pr_debug("Added clock from %s\n", np->full_name);
2161
2162 return 0;
2163}
2164EXPORT_SYMBOL_GPL(of_clk_add_provider);
2165
2166/**
2167 * of_clk_del_provider() - Remove a previously registered clock provider
2168 * @np: Device node pointer associated with clock provider
2169 */
2170void of_clk_del_provider(struct device_node *np)
2171{
2172 struct of_clk_provider *cp;
2173
2174 mutex_lock(&of_clk_lock);
2175 list_for_each_entry(cp, &of_clk_providers, link) {
2176 if (cp->node == np) {
2177 list_del(&cp->link);
2178 of_node_put(cp->node);
2179 kfree(cp);
2180 break;
2181 }
2182 }
2183 mutex_unlock(&of_clk_lock);
2184}
2185EXPORT_SYMBOL_GPL(of_clk_del_provider);
2186
2187struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
2188{
2189 struct of_clk_provider *provider;
2190 struct clk *clk = ERR_PTR(-ENOENT);
2191
2192 /* Check if we have such a provider in our array */
2193 mutex_lock(&of_clk_lock);
2194 list_for_each_entry(provider, &of_clk_providers, link) {
2195 if (provider->node == clkspec->np)
2196 clk = provider->get(clkspec, provider->data);
2197 if (!IS_ERR(clk))
2198 break;
2199 }
2200 mutex_unlock(&of_clk_lock);
2201
2202 return clk;
2203}
2204
Mike Turquettef6102742013-10-07 23:12:13 -07002205int of_clk_get_parent_count(struct device_node *np)
2206{
2207 return of_count_phandle_with_args(np, "clocks", "#clock-cells");
2208}
2209EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
2210
Grant Likely766e6a42012-04-09 14:50:06 -05002211const char *of_clk_get_parent_name(struct device_node *np, int index)
2212{
2213 struct of_phandle_args clkspec;
2214 const char *clk_name;
2215 int rc;
2216
2217 if (index < 0)
2218 return NULL;
2219
2220 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
2221 &clkspec);
2222 if (rc)
2223 return NULL;
2224
2225 if (of_property_read_string_index(clkspec.np, "clock-output-names",
2226 clkspec.args_count ? clkspec.args[0] : 0,
2227 &clk_name) < 0)
2228 clk_name = clkspec.np->name;
2229
2230 of_node_put(clkspec.np);
2231 return clk_name;
2232}
2233EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
2234
2235/**
2236 * of_clk_init() - Scan and init clock providers from the DT
2237 * @matches: array of compatible values and init functions for providers.
2238 *
2239 * This function scans the device tree for matching clock providers and
2240 * calls their initialization functions
2241 */
2242void __init of_clk_init(const struct of_device_id *matches)
2243{
Alex Elder7f7ed582013-08-22 11:31:31 -05002244 const struct of_device_id *match;
Grant Likely766e6a42012-04-09 14:50:06 -05002245 struct device_node *np;
2246
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +05302247 if (!matches)
2248 matches = __clk_of_table;
2249
Alex Elder7f7ed582013-08-22 11:31:31 -05002250 for_each_matching_node_and_match(np, matches, &match) {
Grant Likely766e6a42012-04-09 14:50:06 -05002251 of_clk_init_cb_t clk_init_cb = match->data;
2252 clk_init_cb(np);
2253 }
2254}
2255#endif