blob: 898052ee0efade136d385e3e8e3a158b36750ebd [file] [log] [blame]
Mike Turquetteb24764902012-03-15 23:11:19 -07001/*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Standard functionality for the common clock API. See Documentation/clk.txt
10 */
11
Stephen Boyd3c373112015-06-19 15:00:46 -070012#include <linux/clk.h>
Michael Turquetteb09d6d92015-01-29 14:22:50 -080013#include <linux/clk-provider.h>
Sylwester Nawrocki86be4082014-06-18 17:29:32 +020014#include <linux/clk/clk-conf.h>
Mike Turquetteb24764902012-03-15 23:11:19 -070015#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/spinlock.h>
18#include <linux/err.h>
19#include <linux/list.h>
20#include <linux/slab.h>
Grant Likely766e6a42012-04-09 14:50:06 -050021#include <linux/of.h>
Stephen Boyd46c87732012-09-24 13:38:04 -070022#include <linux/device.h>
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +053023#include <linux/init.h>
Mike Turquette533ddeb2013-03-28 13:59:02 -070024#include <linux/sched.h>
Stephen Boyd562ef0b2015-05-01 12:16:14 -070025#include <linux/clkdev.h>
Mike Turquetteb24764902012-03-15 23:11:19 -070026
Sylwester Nawrockid6782c22013-08-23 17:03:43 +020027#include "clk.h"
28
Mike Turquetteb24764902012-03-15 23:11:19 -070029static DEFINE_SPINLOCK(enable_lock);
30static DEFINE_MUTEX(prepare_lock);
31
Mike Turquette533ddeb2013-03-28 13:59:02 -070032static struct task_struct *prepare_owner;
33static struct task_struct *enable_owner;
34
35static int prepare_refcnt;
36static int enable_refcnt;
37
Mike Turquetteb24764902012-03-15 23:11:19 -070038static HLIST_HEAD(clk_root_list);
39static HLIST_HEAD(clk_orphan_list);
40static LIST_HEAD(clk_notifier_list);
41
Michael Turquetteb09d6d92015-01-29 14:22:50 -080042/*** private data structures ***/
43
44struct clk_core {
45 const char *name;
46 const struct clk_ops *ops;
47 struct clk_hw *hw;
48 struct module *owner;
49 struct clk_core *parent;
50 const char **parent_names;
51 struct clk_core **parents;
52 u8 num_parents;
53 u8 new_parent_index;
54 unsigned long rate;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +010055 unsigned long req_rate;
Michael Turquetteb09d6d92015-01-29 14:22:50 -080056 unsigned long new_rate;
57 struct clk_core *new_parent;
58 struct clk_core *new_child;
59 unsigned long flags;
60 unsigned int enable_count;
61 unsigned int prepare_count;
Stephen Boyd9783c0d2015-07-16 12:50:27 -070062 unsigned long min_rate;
63 unsigned long max_rate;
Michael Turquetteb09d6d92015-01-29 14:22:50 -080064 unsigned long accuracy;
65 int phase;
66 struct hlist_head children;
67 struct hlist_node child_node;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +010068 struct hlist_head clks;
Michael Turquetteb09d6d92015-01-29 14:22:50 -080069 unsigned int notifier_count;
70#ifdef CONFIG_DEBUG_FS
71 struct dentry *dentry;
Maxime Coquelin8c9a8a82015-06-10 13:28:27 +020072 struct hlist_node debug_node;
Michael Turquetteb09d6d92015-01-29 14:22:50 -080073#endif
74 struct kref ref;
75};
76
Stephen Boyddfc202e2015-02-02 14:37:41 -080077#define CREATE_TRACE_POINTS
78#include <trace/events/clk.h>
79
Michael Turquetteb09d6d92015-01-29 14:22:50 -080080struct clk {
81 struct clk_core *core;
82 const char *dev_id;
83 const char *con_id;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +010084 unsigned long min_rate;
85 unsigned long max_rate;
Stephen Boyd50595f82015-02-06 11:42:44 -080086 struct hlist_node clks_node;
Michael Turquetteb09d6d92015-01-29 14:22:50 -080087};
88
Mike Turquetteeab89f62013-03-28 13:59:01 -070089/*** locking ***/
90static void clk_prepare_lock(void)
91{
Mike Turquette533ddeb2013-03-28 13:59:02 -070092 if (!mutex_trylock(&prepare_lock)) {
93 if (prepare_owner == current) {
94 prepare_refcnt++;
95 return;
96 }
97 mutex_lock(&prepare_lock);
98 }
99 WARN_ON_ONCE(prepare_owner != NULL);
100 WARN_ON_ONCE(prepare_refcnt != 0);
101 prepare_owner = current;
102 prepare_refcnt = 1;
Mike Turquetteeab89f62013-03-28 13:59:01 -0700103}
104
105static void clk_prepare_unlock(void)
106{
Mike Turquette533ddeb2013-03-28 13:59:02 -0700107 WARN_ON_ONCE(prepare_owner != current);
108 WARN_ON_ONCE(prepare_refcnt == 0);
109
110 if (--prepare_refcnt)
111 return;
112 prepare_owner = NULL;
Mike Turquetteeab89f62013-03-28 13:59:01 -0700113 mutex_unlock(&prepare_lock);
114}
115
116static unsigned long clk_enable_lock(void)
Stephen Boyda57aa182015-07-24 12:24:48 -0700117 __acquires(enable_lock)
Mike Turquetteeab89f62013-03-28 13:59:01 -0700118{
119 unsigned long flags;
Mike Turquette533ddeb2013-03-28 13:59:02 -0700120
121 if (!spin_trylock_irqsave(&enable_lock, flags)) {
122 if (enable_owner == current) {
123 enable_refcnt++;
Stephen Boyda57aa182015-07-24 12:24:48 -0700124 __acquire(enable_lock);
Mike Turquette533ddeb2013-03-28 13:59:02 -0700125 return flags;
126 }
127 spin_lock_irqsave(&enable_lock, flags);
128 }
129 WARN_ON_ONCE(enable_owner != NULL);
130 WARN_ON_ONCE(enable_refcnt != 0);
131 enable_owner = current;
132 enable_refcnt = 1;
Mike Turquetteeab89f62013-03-28 13:59:01 -0700133 return flags;
134}
135
136static void clk_enable_unlock(unsigned long flags)
Stephen Boyda57aa182015-07-24 12:24:48 -0700137 __releases(enable_lock)
Mike Turquetteeab89f62013-03-28 13:59:01 -0700138{
Mike Turquette533ddeb2013-03-28 13:59:02 -0700139 WARN_ON_ONCE(enable_owner != current);
140 WARN_ON_ONCE(enable_refcnt == 0);
141
Stephen Boyda57aa182015-07-24 12:24:48 -0700142 if (--enable_refcnt) {
143 __release(enable_lock);
Mike Turquette533ddeb2013-03-28 13:59:02 -0700144 return;
Stephen Boyda57aa182015-07-24 12:24:48 -0700145 }
Mike Turquette533ddeb2013-03-28 13:59:02 -0700146 enable_owner = NULL;
Mike Turquetteeab89f62013-03-28 13:59:01 -0700147 spin_unlock_irqrestore(&enable_lock, flags);
148}
149
Stephen Boyd4dff95d2015-04-30 14:43:22 -0700150static bool clk_core_is_prepared(struct clk_core *core)
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530151{
Stephen Boyd4dff95d2015-04-30 14:43:22 -0700152 /*
153 * .is_prepared is optional for clocks that can prepare
154 * fall back to software usage counter if it is missing
155 */
156 if (!core->ops->is_prepared)
157 return core->prepare_count;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530158
Stephen Boyd4dff95d2015-04-30 14:43:22 -0700159 return core->ops->is_prepared(core->hw);
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530160}
161
Stephen Boyd4dff95d2015-04-30 14:43:22 -0700162static bool clk_core_is_enabled(struct clk_core *core)
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530163{
Stephen Boyd4dff95d2015-04-30 14:43:22 -0700164 /*
165 * .is_enabled is only mandatory for clocks that gate
166 * fall back to software usage counter if .is_enabled is missing
167 */
168 if (!core->ops->is_enabled)
169 return core->enable_count;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530170
Stephen Boyd4dff95d2015-04-30 14:43:22 -0700171 return core->ops->is_enabled(core->hw);
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530172}
173
Stephen Boydd6968fc2015-04-30 13:54:13 -0700174static void clk_unprepare_unused_subtree(struct clk_core *core)
Ulf Hansson1c155b32013-03-12 20:26:03 +0100175{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100176 struct clk_core *child;
Ulf Hansson1c155b32013-03-12 20:26:03 +0100177
Krzysztof Kozlowski496eadf2015-01-09 09:28:10 +0100178 lockdep_assert_held(&prepare_lock);
179
Stephen Boydd6968fc2015-04-30 13:54:13 -0700180 hlist_for_each_entry(child, &core->children, child_node)
Ulf Hansson1c155b32013-03-12 20:26:03 +0100181 clk_unprepare_unused_subtree(child);
182
Stephen Boydd6968fc2015-04-30 13:54:13 -0700183 if (core->prepare_count)
Ulf Hansson1c155b32013-03-12 20:26:03 +0100184 return;
185
Stephen Boydd6968fc2015-04-30 13:54:13 -0700186 if (core->flags & CLK_IGNORE_UNUSED)
Ulf Hansson1c155b32013-03-12 20:26:03 +0100187 return;
188
Stephen Boydd6968fc2015-04-30 13:54:13 -0700189 if (clk_core_is_prepared(core)) {
190 trace_clk_unprepare(core);
191 if (core->ops->unprepare_unused)
192 core->ops->unprepare_unused(core->hw);
193 else if (core->ops->unprepare)
194 core->ops->unprepare(core->hw);
195 trace_clk_unprepare_complete(core);
Ulf Hansson3cc82472013-03-12 20:26:04 +0100196 }
Ulf Hansson1c155b32013-03-12 20:26:03 +0100197}
198
Stephen Boydd6968fc2015-04-30 13:54:13 -0700199static void clk_disable_unused_subtree(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700200{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100201 struct clk_core *child;
Mike Turquetteb24764902012-03-15 23:11:19 -0700202 unsigned long flags;
203
Krzysztof Kozlowski496eadf2015-01-09 09:28:10 +0100204 lockdep_assert_held(&prepare_lock);
205
Stephen Boydd6968fc2015-04-30 13:54:13 -0700206 hlist_for_each_entry(child, &core->children, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700207 clk_disable_unused_subtree(child);
208
Mike Turquetteeab89f62013-03-28 13:59:01 -0700209 flags = clk_enable_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700210
Stephen Boydd6968fc2015-04-30 13:54:13 -0700211 if (core->enable_count)
Mike Turquetteb24764902012-03-15 23:11:19 -0700212 goto unlock_out;
213
Stephen Boydd6968fc2015-04-30 13:54:13 -0700214 if (core->flags & CLK_IGNORE_UNUSED)
Mike Turquetteb24764902012-03-15 23:11:19 -0700215 goto unlock_out;
216
Mike Turquette7c045a52012-12-04 11:00:35 -0800217 /*
218 * some gate clocks have special needs during the disable-unused
219 * sequence. call .disable_unused if available, otherwise fall
220 * back to .disable
221 */
Stephen Boydd6968fc2015-04-30 13:54:13 -0700222 if (clk_core_is_enabled(core)) {
223 trace_clk_disable(core);
224 if (core->ops->disable_unused)
225 core->ops->disable_unused(core->hw);
226 else if (core->ops->disable)
227 core->ops->disable(core->hw);
228 trace_clk_disable_complete(core);
Mike Turquette7c045a52012-12-04 11:00:35 -0800229 }
Mike Turquetteb24764902012-03-15 23:11:19 -0700230
231unlock_out:
Mike Turquetteeab89f62013-03-28 13:59:01 -0700232 clk_enable_unlock(flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700233}
234
Olof Johansson1e435252013-04-27 14:10:18 -0700235static bool clk_ignore_unused;
236static int __init clk_ignore_unused_setup(char *__unused)
237{
238 clk_ignore_unused = true;
239 return 1;
240}
241__setup("clk_ignore_unused", clk_ignore_unused_setup);
242
Mike Turquetteb24764902012-03-15 23:11:19 -0700243static int clk_disable_unused(void)
244{
Stephen Boydd6968fc2015-04-30 13:54:13 -0700245 struct clk_core *core;
Mike Turquetteb24764902012-03-15 23:11:19 -0700246
Olof Johansson1e435252013-04-27 14:10:18 -0700247 if (clk_ignore_unused) {
248 pr_warn("clk: Not disabling unused clocks\n");
249 return 0;
250 }
251
Mike Turquetteeab89f62013-03-28 13:59:01 -0700252 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700253
Stephen Boydd6968fc2015-04-30 13:54:13 -0700254 hlist_for_each_entry(core, &clk_root_list, child_node)
255 clk_disable_unused_subtree(core);
Mike Turquetteb24764902012-03-15 23:11:19 -0700256
Stephen Boydd6968fc2015-04-30 13:54:13 -0700257 hlist_for_each_entry(core, &clk_orphan_list, child_node)
258 clk_disable_unused_subtree(core);
Mike Turquetteb24764902012-03-15 23:11:19 -0700259
Stephen Boydd6968fc2015-04-30 13:54:13 -0700260 hlist_for_each_entry(core, &clk_root_list, child_node)
261 clk_unprepare_unused_subtree(core);
Ulf Hansson1c155b32013-03-12 20:26:03 +0100262
Stephen Boydd6968fc2015-04-30 13:54:13 -0700263 hlist_for_each_entry(core, &clk_orphan_list, child_node)
264 clk_unprepare_unused_subtree(core);
Ulf Hansson1c155b32013-03-12 20:26:03 +0100265
Mike Turquetteeab89f62013-03-28 13:59:01 -0700266 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700267
268 return 0;
269}
Saravana Kannand41d5802013-05-09 11:35:01 -0700270late_initcall_sync(clk_disable_unused);
Mike Turquetteb24764902012-03-15 23:11:19 -0700271
272/*** helper functions ***/
273
Russ Dill65800b22012-11-26 11:20:09 -0800274const char *__clk_get_name(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700275{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100276 return !clk ? NULL : clk->core->name;
Mike Turquetteb24764902012-03-15 23:11:19 -0700277}
Niels de Vos48950842012-12-13 13:12:25 +0100278EXPORT_SYMBOL_GPL(__clk_get_name);
Mike Turquetteb24764902012-03-15 23:11:19 -0700279
Russ Dill65800b22012-11-26 11:20:09 -0800280struct clk_hw *__clk_get_hw(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700281{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100282 return !clk ? NULL : clk->core->hw;
Mike Turquetteb24764902012-03-15 23:11:19 -0700283}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800284EXPORT_SYMBOL_GPL(__clk_get_hw);
Mike Turquetteb24764902012-03-15 23:11:19 -0700285
Russ Dill65800b22012-11-26 11:20:09 -0800286u8 __clk_get_num_parents(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700287{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100288 return !clk ? 0 : clk->core->num_parents;
Mike Turquetteb24764902012-03-15 23:11:19 -0700289}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800290EXPORT_SYMBOL_GPL(__clk_get_num_parents);
Mike Turquetteb24764902012-03-15 23:11:19 -0700291
Russ Dill65800b22012-11-26 11:20:09 -0800292struct clk *__clk_get_parent(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700293{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100294 if (!clk)
295 return NULL;
296
297 /* TODO: Create a per-user clk and change callers to call clk_put */
298 return !clk->core->parent ? NULL : clk->core->parent->hw->clk;
Mike Turquetteb24764902012-03-15 23:11:19 -0700299}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800300EXPORT_SYMBOL_GPL(__clk_get_parent);
Mike Turquetteb24764902012-03-15 23:11:19 -0700301
Stephen Boyd4dff95d2015-04-30 14:43:22 -0700302static struct clk_core *__clk_lookup_subtree(const char *name,
303 struct clk_core *core)
304{
305 struct clk_core *child;
306 struct clk_core *ret;
307
308 if (!strcmp(core->name, name))
309 return core;
310
311 hlist_for_each_entry(child, &core->children, child_node) {
312 ret = __clk_lookup_subtree(name, child);
313 if (ret)
314 return ret;
315 }
316
317 return NULL;
318}
319
320static struct clk_core *clk_core_lookup(const char *name)
321{
322 struct clk_core *root_clk;
323 struct clk_core *ret;
324
325 if (!name)
326 return NULL;
327
328 /* search the 'proper' clk tree first */
329 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
330 ret = __clk_lookup_subtree(name, root_clk);
331 if (ret)
332 return ret;
333 }
334
335 /* if not found, then search the orphan tree */
336 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
337 ret = __clk_lookup_subtree(name, root_clk);
338 if (ret)
339 return ret;
340 }
341
342 return NULL;
343}
344
Stephen Boydd6968fc2015-04-30 13:54:13 -0700345static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100346 u8 index)
James Hogan7ef3dcc2013-07-29 12:24:58 +0100347{
Stephen Boydd6968fc2015-04-30 13:54:13 -0700348 if (!core || index >= core->num_parents)
James Hogan7ef3dcc2013-07-29 12:24:58 +0100349 return NULL;
Stephen Boydd6968fc2015-04-30 13:54:13 -0700350 else if (!core->parents)
351 return clk_core_lookup(core->parent_names[index]);
352 else if (!core->parents[index])
353 return core->parents[index] =
354 clk_core_lookup(core->parent_names[index]);
James Hogan7ef3dcc2013-07-29 12:24:58 +0100355 else
Stephen Boydd6968fc2015-04-30 13:54:13 -0700356 return core->parents[index];
James Hogan7ef3dcc2013-07-29 12:24:58 +0100357}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100358
359struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
360{
361 struct clk_core *parent;
362
363 if (!clk)
364 return NULL;
365
366 parent = clk_core_get_parent_by_index(clk->core, index);
367
368 return !parent ? NULL : parent->hw->clk;
369}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800370EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
James Hogan7ef3dcc2013-07-29 12:24:58 +0100371
Russ Dill65800b22012-11-26 11:20:09 -0800372unsigned int __clk_get_enable_count(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700373{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100374 return !clk ? 0 : clk->core->enable_count;
Mike Turquetteb24764902012-03-15 23:11:19 -0700375}
376
Stephen Boydd6968fc2015-04-30 13:54:13 -0700377static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700378{
379 unsigned long ret;
380
Stephen Boydd6968fc2015-04-30 13:54:13 -0700381 if (!core) {
Rajendra Nayak34e44fe2012-03-26 19:01:48 +0530382 ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700383 goto out;
384 }
385
Stephen Boydd6968fc2015-04-30 13:54:13 -0700386 ret = core->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -0700387
Stephen Boydd6968fc2015-04-30 13:54:13 -0700388 if (core->flags & CLK_IS_ROOT)
Mike Turquetteb24764902012-03-15 23:11:19 -0700389 goto out;
390
Stephen Boydd6968fc2015-04-30 13:54:13 -0700391 if (!core->parent)
Rajendra Nayak34e44fe2012-03-26 19:01:48 +0530392 ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700393
394out:
395 return ret;
396}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100397
398unsigned long __clk_get_rate(struct clk *clk)
399{
400 if (!clk)
401 return 0;
402
403 return clk_core_get_rate_nolock(clk->core);
404}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800405EXPORT_SYMBOL_GPL(__clk_get_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -0700406
Stephen Boydd6968fc2015-04-30 13:54:13 -0700407static unsigned long __clk_get_accuracy(struct clk_core *core)
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100408{
Stephen Boydd6968fc2015-04-30 13:54:13 -0700409 if (!core)
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100410 return 0;
411
Stephen Boydd6968fc2015-04-30 13:54:13 -0700412 return core->accuracy;
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100413}
414
Russ Dill65800b22012-11-26 11:20:09 -0800415unsigned long __clk_get_flags(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700416{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100417 return !clk ? 0 : clk->core->flags;
Mike Turquetteb24764902012-03-15 23:11:19 -0700418}
Thierry Redingb05c6832013-09-03 09:43:51 +0200419EXPORT_SYMBOL_GPL(__clk_get_flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700420
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100421bool __clk_is_prepared(struct clk *clk)
422{
423 if (!clk)
424 return false;
425
426 return clk_core_is_prepared(clk->core);
427}
428
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100429bool __clk_is_enabled(struct clk *clk)
430{
431 if (!clk)
432 return false;
433
434 return clk_core_is_enabled(clk->core);
435}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800436EXPORT_SYMBOL_GPL(__clk_is_enabled);
Mike Turquetteb24764902012-03-15 23:11:19 -0700437
Stephen Boyd15a02c12015-01-19 18:05:28 -0800438static bool mux_is_better_rate(unsigned long rate, unsigned long now,
439 unsigned long best, unsigned long flags)
James Hogane366fdd2013-07-29 12:25:02 +0100440{
Stephen Boyd15a02c12015-01-19 18:05:28 -0800441 if (flags & CLK_MUX_ROUND_CLOSEST)
442 return abs(now - rate) < abs(best - rate);
443
444 return now <= rate && now > best;
445}
446
Boris Brezillon0817b622015-07-07 20:48:08 +0200447static int
448clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req,
Stephen Boyd15a02c12015-01-19 18:05:28 -0800449 unsigned long flags)
James Hogane366fdd2013-07-29 12:25:02 +0100450{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100451 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
Boris Brezillon0817b622015-07-07 20:48:08 +0200452 int i, num_parents, ret;
453 unsigned long best = 0;
454 struct clk_rate_request parent_req = *req;
James Hogane366fdd2013-07-29 12:25:02 +0100455
456 /* if NO_REPARENT flag set, pass through to current parent */
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100457 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
458 parent = core->parent;
Boris Brezillon0817b622015-07-07 20:48:08 +0200459 if (core->flags & CLK_SET_RATE_PARENT) {
460 ret = __clk_determine_rate(parent ? parent->hw : NULL,
461 &parent_req);
462 if (ret)
463 return ret;
464
465 best = parent_req.rate;
466 } else if (parent) {
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100467 best = clk_core_get_rate_nolock(parent);
Boris Brezillon0817b622015-07-07 20:48:08 +0200468 } else {
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100469 best = clk_core_get_rate_nolock(core);
Boris Brezillon0817b622015-07-07 20:48:08 +0200470 }
471
James Hogane366fdd2013-07-29 12:25:02 +0100472 goto out;
473 }
474
475 /* find the parent that can provide the fastest rate <= rate */
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100476 num_parents = core->num_parents;
James Hogane366fdd2013-07-29 12:25:02 +0100477 for (i = 0; i < num_parents; i++) {
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100478 parent = clk_core_get_parent_by_index(core, i);
James Hogane366fdd2013-07-29 12:25:02 +0100479 if (!parent)
480 continue;
Boris Brezillon0817b622015-07-07 20:48:08 +0200481
482 if (core->flags & CLK_SET_RATE_PARENT) {
483 parent_req = *req;
484 ret = __clk_determine_rate(parent->hw, &parent_req);
485 if (ret)
486 continue;
487 } else {
488 parent_req.rate = clk_core_get_rate_nolock(parent);
489 }
490
491 if (mux_is_better_rate(req->rate, parent_req.rate,
492 best, flags)) {
James Hogane366fdd2013-07-29 12:25:02 +0100493 best_parent = parent;
Boris Brezillon0817b622015-07-07 20:48:08 +0200494 best = parent_req.rate;
James Hogane366fdd2013-07-29 12:25:02 +0100495 }
496 }
497
Boris Brezillon57d866e2015-07-09 22:39:38 +0200498 if (!best_parent)
499 return -EINVAL;
500
James Hogane366fdd2013-07-29 12:25:02 +0100501out:
502 if (best_parent)
Boris Brezillon0817b622015-07-07 20:48:08 +0200503 req->best_parent_hw = best_parent->hw;
504 req->best_parent_rate = best;
505 req->rate = best;
James Hogane366fdd2013-07-29 12:25:02 +0100506
Boris Brezillon0817b622015-07-07 20:48:08 +0200507 return 0;
James Hogane366fdd2013-07-29 12:25:02 +0100508}
Stephen Boyd15a02c12015-01-19 18:05:28 -0800509
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100510struct clk *__clk_lookup(const char *name)
511{
512 struct clk_core *core = clk_core_lookup(name);
513
514 return !core ? NULL : core->hw->clk;
515}
516
Stephen Boydd6968fc2015-04-30 13:54:13 -0700517static void clk_core_get_boundaries(struct clk_core *core,
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100518 unsigned long *min_rate,
519 unsigned long *max_rate)
520{
521 struct clk *clk_user;
522
Stephen Boyd9783c0d2015-07-16 12:50:27 -0700523 *min_rate = core->min_rate;
524 *max_rate = core->max_rate;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100525
Stephen Boydd6968fc2015-04-30 13:54:13 -0700526 hlist_for_each_entry(clk_user, &core->clks, clks_node)
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100527 *min_rate = max(*min_rate, clk_user->min_rate);
528
Stephen Boydd6968fc2015-04-30 13:54:13 -0700529 hlist_for_each_entry(clk_user, &core->clks, clks_node)
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100530 *max_rate = min(*max_rate, clk_user->max_rate);
531}
532
Stephen Boyd9783c0d2015-07-16 12:50:27 -0700533void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
534 unsigned long max_rate)
535{
536 hw->core->min_rate = min_rate;
537 hw->core->max_rate = max_rate;
538}
539EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
540
Stephen Boyd15a02c12015-01-19 18:05:28 -0800541/*
542 * Helper for finding best parent to provide a given frequency. This can be used
543 * directly as a determine_rate callback (e.g. for a mux), or from a more
544 * complex clock that may combine a mux with other operations.
545 */
Boris Brezillon0817b622015-07-07 20:48:08 +0200546int __clk_mux_determine_rate(struct clk_hw *hw,
547 struct clk_rate_request *req)
Stephen Boyd15a02c12015-01-19 18:05:28 -0800548{
Boris Brezillon0817b622015-07-07 20:48:08 +0200549 return clk_mux_determine_rate_flags(hw, req, 0);
Stephen Boyd15a02c12015-01-19 18:05:28 -0800550}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800551EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
James Hogane366fdd2013-07-29 12:25:02 +0100552
Boris Brezillon0817b622015-07-07 20:48:08 +0200553int __clk_mux_determine_rate_closest(struct clk_hw *hw,
554 struct clk_rate_request *req)
Stephen Boyd15a02c12015-01-19 18:05:28 -0800555{
Boris Brezillon0817b622015-07-07 20:48:08 +0200556 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
Stephen Boyd15a02c12015-01-19 18:05:28 -0800557}
558EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
559
Mike Turquetteb24764902012-03-15 23:11:19 -0700560/*** clk api ***/
561
Stephen Boydd6968fc2015-04-30 13:54:13 -0700562static void clk_core_unprepare(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700563{
Stephen Boyda6334722015-05-06 17:00:54 -0700564 lockdep_assert_held(&prepare_lock);
565
Stephen Boydd6968fc2015-04-30 13:54:13 -0700566 if (!core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700567 return;
568
Stephen Boydd6968fc2015-04-30 13:54:13 -0700569 if (WARN_ON(core->prepare_count == 0))
Mike Turquetteb24764902012-03-15 23:11:19 -0700570 return;
571
Stephen Boydd6968fc2015-04-30 13:54:13 -0700572 if (--core->prepare_count > 0)
Mike Turquetteb24764902012-03-15 23:11:19 -0700573 return;
574
Stephen Boydd6968fc2015-04-30 13:54:13 -0700575 WARN_ON(core->enable_count > 0);
Mike Turquetteb24764902012-03-15 23:11:19 -0700576
Stephen Boydd6968fc2015-04-30 13:54:13 -0700577 trace_clk_unprepare(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800578
Stephen Boydd6968fc2015-04-30 13:54:13 -0700579 if (core->ops->unprepare)
580 core->ops->unprepare(core->hw);
Mike Turquetteb24764902012-03-15 23:11:19 -0700581
Stephen Boydd6968fc2015-04-30 13:54:13 -0700582 trace_clk_unprepare_complete(core);
583 clk_core_unprepare(core->parent);
Mike Turquetteb24764902012-03-15 23:11:19 -0700584}
585
586/**
587 * clk_unprepare - undo preparation of a clock source
Peter Meerwald24ee1a02013-06-29 15:14:19 +0200588 * @clk: the clk being unprepared
Mike Turquetteb24764902012-03-15 23:11:19 -0700589 *
590 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
591 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
592 * if the operation may sleep. One example is a clk which is accessed over
593 * I2c. In the complex case a clk gate operation may require a fast and a slow
594 * part. It is this reason that clk_unprepare and clk_disable are not mutually
595 * exclusive. In fact clk_disable must be called before clk_unprepare.
596 */
597void clk_unprepare(struct clk *clk)
598{
Stephen Boyd63589e92014-03-26 16:06:37 -0700599 if (IS_ERR_OR_NULL(clk))
600 return;
601
Mike Turquetteeab89f62013-03-28 13:59:01 -0700602 clk_prepare_lock();
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100603 clk_core_unprepare(clk->core);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700604 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700605}
606EXPORT_SYMBOL_GPL(clk_unprepare);
607
Stephen Boydd6968fc2015-04-30 13:54:13 -0700608static int clk_core_prepare(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700609{
610 int ret = 0;
611
Stephen Boyda6334722015-05-06 17:00:54 -0700612 lockdep_assert_held(&prepare_lock);
613
Stephen Boydd6968fc2015-04-30 13:54:13 -0700614 if (!core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700615 return 0;
616
Stephen Boydd6968fc2015-04-30 13:54:13 -0700617 if (core->prepare_count == 0) {
618 ret = clk_core_prepare(core->parent);
Mike Turquetteb24764902012-03-15 23:11:19 -0700619 if (ret)
620 return ret;
621
Stephen Boydd6968fc2015-04-30 13:54:13 -0700622 trace_clk_prepare(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800623
Stephen Boydd6968fc2015-04-30 13:54:13 -0700624 if (core->ops->prepare)
625 ret = core->ops->prepare(core->hw);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800626
Stephen Boydd6968fc2015-04-30 13:54:13 -0700627 trace_clk_prepare_complete(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800628
629 if (ret) {
Stephen Boydd6968fc2015-04-30 13:54:13 -0700630 clk_core_unprepare(core->parent);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800631 return ret;
Mike Turquetteb24764902012-03-15 23:11:19 -0700632 }
633 }
634
Stephen Boydd6968fc2015-04-30 13:54:13 -0700635 core->prepare_count++;
Mike Turquetteb24764902012-03-15 23:11:19 -0700636
637 return 0;
638}
639
640/**
641 * clk_prepare - prepare a clock source
642 * @clk: the clk being prepared
643 *
644 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
645 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
646 * operation may sleep. One example is a clk which is accessed over I2c. In
647 * the complex case a clk ungate operation may require a fast and a slow part.
648 * It is this reason that clk_prepare and clk_enable are not mutually
649 * exclusive. In fact clk_prepare must be called before clk_enable.
650 * Returns 0 on success, -EERROR otherwise.
651 */
652int clk_prepare(struct clk *clk)
653{
654 int ret;
655
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100656 if (!clk)
657 return 0;
658
Mike Turquetteeab89f62013-03-28 13:59:01 -0700659 clk_prepare_lock();
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100660 ret = clk_core_prepare(clk->core);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700661 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700662
663 return ret;
664}
665EXPORT_SYMBOL_GPL(clk_prepare);
666
Stephen Boydd6968fc2015-04-30 13:54:13 -0700667static void clk_core_disable(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700668{
Stephen Boyda6334722015-05-06 17:00:54 -0700669 lockdep_assert_held(&enable_lock);
670
Stephen Boydd6968fc2015-04-30 13:54:13 -0700671 if (!core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700672 return;
673
Stephen Boydd6968fc2015-04-30 13:54:13 -0700674 if (WARN_ON(core->enable_count == 0))
Mike Turquetteb24764902012-03-15 23:11:19 -0700675 return;
676
Stephen Boydd6968fc2015-04-30 13:54:13 -0700677 if (--core->enable_count > 0)
Mike Turquetteb24764902012-03-15 23:11:19 -0700678 return;
679
Stephen Boydd6968fc2015-04-30 13:54:13 -0700680 trace_clk_disable(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800681
Stephen Boydd6968fc2015-04-30 13:54:13 -0700682 if (core->ops->disable)
683 core->ops->disable(core->hw);
Mike Turquetteb24764902012-03-15 23:11:19 -0700684
Stephen Boydd6968fc2015-04-30 13:54:13 -0700685 trace_clk_disable_complete(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800686
Stephen Boydd6968fc2015-04-30 13:54:13 -0700687 clk_core_disable(core->parent);
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100688}
689
Mike Turquetteb24764902012-03-15 23:11:19 -0700690/**
691 * clk_disable - gate a clock
692 * @clk: the clk being gated
693 *
694 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
695 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
696 * clk if the operation is fast and will never sleep. One example is a
697 * SoC-internal clk which is controlled via simple register writes. In the
698 * complex case a clk gate operation may require a fast and a slow part. It is
699 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
700 * In fact clk_disable must be called before clk_unprepare.
701 */
702void clk_disable(struct clk *clk)
703{
704 unsigned long flags;
705
Stephen Boyd63589e92014-03-26 16:06:37 -0700706 if (IS_ERR_OR_NULL(clk))
707 return;
708
Mike Turquetteeab89f62013-03-28 13:59:01 -0700709 flags = clk_enable_lock();
Dong Aisheng864e1602015-04-30 14:02:19 -0700710 clk_core_disable(clk->core);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700711 clk_enable_unlock(flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700712}
713EXPORT_SYMBOL_GPL(clk_disable);
714
Stephen Boydd6968fc2015-04-30 13:54:13 -0700715static int clk_core_enable(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700716{
717 int ret = 0;
718
Stephen Boyda6334722015-05-06 17:00:54 -0700719 lockdep_assert_held(&enable_lock);
720
Stephen Boydd6968fc2015-04-30 13:54:13 -0700721 if (!core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700722 return 0;
723
Stephen Boydd6968fc2015-04-30 13:54:13 -0700724 if (WARN_ON(core->prepare_count == 0))
Mike Turquetteb24764902012-03-15 23:11:19 -0700725 return -ESHUTDOWN;
726
Stephen Boydd6968fc2015-04-30 13:54:13 -0700727 if (core->enable_count == 0) {
728 ret = clk_core_enable(core->parent);
Mike Turquetteb24764902012-03-15 23:11:19 -0700729
730 if (ret)
731 return ret;
732
Stephen Boydd6968fc2015-04-30 13:54:13 -0700733 trace_clk_enable(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800734
Stephen Boydd6968fc2015-04-30 13:54:13 -0700735 if (core->ops->enable)
736 ret = core->ops->enable(core->hw);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800737
Stephen Boydd6968fc2015-04-30 13:54:13 -0700738 trace_clk_enable_complete(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800739
740 if (ret) {
Stephen Boydd6968fc2015-04-30 13:54:13 -0700741 clk_core_disable(core->parent);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800742 return ret;
Mike Turquetteb24764902012-03-15 23:11:19 -0700743 }
744 }
745
Stephen Boydd6968fc2015-04-30 13:54:13 -0700746 core->enable_count++;
Mike Turquetteb24764902012-03-15 23:11:19 -0700747 return 0;
748}
749
750/**
751 * clk_enable - ungate a clock
752 * @clk: the clk being ungated
753 *
754 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
755 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
756 * if the operation will never sleep. One example is a SoC-internal clk which
757 * is controlled via simple register writes. In the complex case a clk ungate
758 * operation may require a fast and a slow part. It is this reason that
759 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
760 * must be called before clk_enable. Returns 0 on success, -EERROR
761 * otherwise.
762 */
763int clk_enable(struct clk *clk)
764{
765 unsigned long flags;
766 int ret;
767
Dong Aisheng864e1602015-04-30 14:02:19 -0700768 if (!clk)
769 return 0;
770
Mike Turquetteeab89f62013-03-28 13:59:01 -0700771 flags = clk_enable_lock();
Dong Aisheng864e1602015-04-30 14:02:19 -0700772 ret = clk_core_enable(clk->core);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700773 clk_enable_unlock(flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700774
775 return ret;
776}
777EXPORT_SYMBOL_GPL(clk_enable);
778
Boris Brezillon0817b622015-07-07 20:48:08 +0200779static int clk_core_round_rate_nolock(struct clk_core *core,
780 struct clk_rate_request *req)
Mike Turquetteb24764902012-03-15 23:11:19 -0700781{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100782 struct clk_core *parent;
Boris Brezillon0817b622015-07-07 20:48:08 +0200783 long rate;
Mike Turquetteb24764902012-03-15 23:11:19 -0700784
Krzysztof Kozlowski496eadf2015-01-09 09:28:10 +0100785 lockdep_assert_held(&prepare_lock);
786
Stephen Boydd6968fc2015-04-30 13:54:13 -0700787 if (!core)
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700788 return 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700789
Stephen Boydd6968fc2015-04-30 13:54:13 -0700790 parent = core->parent;
Boris Brezillon0817b622015-07-07 20:48:08 +0200791 if (parent) {
792 req->best_parent_hw = parent->hw;
793 req->best_parent_rate = parent->rate;
794 } else {
795 req->best_parent_hw = NULL;
796 req->best_parent_rate = 0;
797 }
Mike Turquetteb24764902012-03-15 23:11:19 -0700798
Stephen Boydd6968fc2015-04-30 13:54:13 -0700799 if (core->ops->determine_rate) {
Boris Brezillon0817b622015-07-07 20:48:08 +0200800 return core->ops->determine_rate(core->hw, req);
801 } else if (core->ops->round_rate) {
802 rate = core->ops->round_rate(core->hw, req->rate,
803 &req->best_parent_rate);
804 if (rate < 0)
805 return rate;
806
807 req->rate = rate;
808 } else if (core->flags & CLK_SET_RATE_PARENT) {
809 return clk_core_round_rate_nolock(parent, req);
810 } else {
811 req->rate = core->rate;
812 }
813
814 return 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700815}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100816
817/**
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100818 * __clk_determine_rate - get the closest rate actually supported by a clock
819 * @hw: determine the rate of this clock
820 * @rate: target rate
821 * @min_rate: returned rate must be greater than this rate
822 * @max_rate: returned rate must be less than this rate
823 *
Stephen Boyd6e5ab412015-04-30 15:11:31 -0700824 * Useful for clk_ops such as .set_rate and .determine_rate.
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100825 */
Boris Brezillon0817b622015-07-07 20:48:08 +0200826int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100827{
Boris Brezillon0817b622015-07-07 20:48:08 +0200828 if (!hw) {
829 req->rate = 0;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100830 return 0;
Boris Brezillon0817b622015-07-07 20:48:08 +0200831 }
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100832
Boris Brezillon0817b622015-07-07 20:48:08 +0200833 return clk_core_round_rate_nolock(hw->core, req);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100834}
835EXPORT_SYMBOL_GPL(__clk_determine_rate);
836
837/**
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100838 * __clk_round_rate - round the given rate for a clk
839 * @clk: round the rate of this clock
840 * @rate: the rate which is to be rounded
841 *
Stephen Boyd6e5ab412015-04-30 15:11:31 -0700842 * Useful for clk_ops such as .set_rate
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100843 */
844unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
845{
Boris Brezillon0817b622015-07-07 20:48:08 +0200846 struct clk_rate_request req;
847 int ret;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100848
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100849 if (!clk)
850 return 0;
851
Boris Brezillon0817b622015-07-07 20:48:08 +0200852 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
853 req.rate = rate;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100854
Boris Brezillon0817b622015-07-07 20:48:08 +0200855 ret = clk_core_round_rate_nolock(clk->core, &req);
856 if (ret)
857 return 0;
858
859 return req.rate;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100860}
Arnd Bergmann1cdf8ee2014-06-03 11:40:14 +0200861EXPORT_SYMBOL_GPL(__clk_round_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -0700862
863/**
864 * clk_round_rate - round the given rate for a clk
865 * @clk: the clk for which we are rounding a rate
866 * @rate: the rate which is to be rounded
867 *
868 * Takes in a rate as input and rounds it to a rate that the clk can actually
869 * use which is then returned. If clk doesn't support round_rate operation
870 * then the parent rate is returned.
871 */
872long clk_round_rate(struct clk *clk, unsigned long rate)
873{
874 unsigned long ret;
875
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100876 if (!clk)
877 return 0;
878
Mike Turquetteeab89f62013-03-28 13:59:01 -0700879 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700880 ret = __clk_round_rate(clk, rate);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700881 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700882
883 return ret;
884}
885EXPORT_SYMBOL_GPL(clk_round_rate);
886
887/**
888 * __clk_notify - call clk notifier chain
Stephen Boydd6968fc2015-04-30 13:54:13 -0700889 * @core: clk that is changing rate
Mike Turquetteb24764902012-03-15 23:11:19 -0700890 * @msg: clk notifier type (see include/linux/clk.h)
891 * @old_rate: old clk rate
892 * @new_rate: new clk rate
893 *
894 * Triggers a notifier call chain on the clk rate-change notification
895 * for 'clk'. Passes a pointer to the struct clk and the previous
896 * and current rates to the notifier callback. Intended to be called by
897 * internal clock code only. Returns NOTIFY_DONE from the last driver
898 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
899 * a driver returns that.
900 */
Stephen Boydd6968fc2015-04-30 13:54:13 -0700901static int __clk_notify(struct clk_core *core, unsigned long msg,
Mike Turquetteb24764902012-03-15 23:11:19 -0700902 unsigned long old_rate, unsigned long new_rate)
903{
904 struct clk_notifier *cn;
905 struct clk_notifier_data cnd;
906 int ret = NOTIFY_DONE;
907
Mike Turquetteb24764902012-03-15 23:11:19 -0700908 cnd.old_rate = old_rate;
909 cnd.new_rate = new_rate;
910
911 list_for_each_entry(cn, &clk_notifier_list, node) {
Stephen Boydd6968fc2015-04-30 13:54:13 -0700912 if (cn->clk->core == core) {
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100913 cnd.clk = cn->clk;
Mike Turquetteb24764902012-03-15 23:11:19 -0700914 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
915 &cnd);
Mike Turquetteb24764902012-03-15 23:11:19 -0700916 }
917 }
918
919 return ret;
920}
921
922/**
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100923 * __clk_recalc_accuracies
Stephen Boydd6968fc2015-04-30 13:54:13 -0700924 * @core: first clk in the subtree
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100925 *
926 * Walks the subtree of clks starting with clk and recalculates accuracies as
927 * it goes. Note that if a clk does not implement the .recalc_accuracy
Stephen Boyd6e5ab412015-04-30 15:11:31 -0700928 * callback then it is assumed that the clock will take on the accuracy of its
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100929 * parent.
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100930 */
Stephen Boydd6968fc2015-04-30 13:54:13 -0700931static void __clk_recalc_accuracies(struct clk_core *core)
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100932{
933 unsigned long parent_accuracy = 0;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100934 struct clk_core *child;
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100935
Krzysztof Kozlowski496eadf2015-01-09 09:28:10 +0100936 lockdep_assert_held(&prepare_lock);
937
Stephen Boydd6968fc2015-04-30 13:54:13 -0700938 if (core->parent)
939 parent_accuracy = core->parent->accuracy;
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100940
Stephen Boydd6968fc2015-04-30 13:54:13 -0700941 if (core->ops->recalc_accuracy)
942 core->accuracy = core->ops->recalc_accuracy(core->hw,
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100943 parent_accuracy);
944 else
Stephen Boydd6968fc2015-04-30 13:54:13 -0700945 core->accuracy = parent_accuracy;
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100946
Stephen Boydd6968fc2015-04-30 13:54:13 -0700947 hlist_for_each_entry(child, &core->children, child_node)
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100948 __clk_recalc_accuracies(child);
949}
950
Stephen Boydd6968fc2015-04-30 13:54:13 -0700951static long clk_core_get_accuracy(struct clk_core *core)
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100952{
953 unsigned long accuracy;
954
955 clk_prepare_lock();
Stephen Boydd6968fc2015-04-30 13:54:13 -0700956 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
957 __clk_recalc_accuracies(core);
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100958
Stephen Boydd6968fc2015-04-30 13:54:13 -0700959 accuracy = __clk_get_accuracy(core);
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100960 clk_prepare_unlock();
961
962 return accuracy;
963}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100964
965/**
966 * clk_get_accuracy - return the accuracy of clk
967 * @clk: the clk whose accuracy is being returned
968 *
969 * Simply returns the cached accuracy of the clk, unless
970 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
971 * issued.
972 * If clk is NULL then returns 0.
973 */
974long clk_get_accuracy(struct clk *clk)
975{
976 if (!clk)
977 return 0;
978
979 return clk_core_get_accuracy(clk->core);
980}
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100981EXPORT_SYMBOL_GPL(clk_get_accuracy);
982
Stephen Boydd6968fc2015-04-30 13:54:13 -0700983static unsigned long clk_recalc(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100984 unsigned long parent_rate)
Stephen Boyd8f2c2db2014-03-26 16:06:36 -0700985{
Stephen Boydd6968fc2015-04-30 13:54:13 -0700986 if (core->ops->recalc_rate)
987 return core->ops->recalc_rate(core->hw, parent_rate);
Stephen Boyd8f2c2db2014-03-26 16:06:36 -0700988 return parent_rate;
989}
990
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100991/**
Mike Turquetteb24764902012-03-15 23:11:19 -0700992 * __clk_recalc_rates
Stephen Boydd6968fc2015-04-30 13:54:13 -0700993 * @core: first clk in the subtree
Mike Turquetteb24764902012-03-15 23:11:19 -0700994 * @msg: notification type (see include/linux/clk.h)
995 *
996 * Walks the subtree of clks starting with clk and recalculates rates as it
997 * goes. Note that if a clk does not implement the .recalc_rate callback then
Peter Meerwald24ee1a02013-06-29 15:14:19 +0200998 * it is assumed that the clock will take on the rate of its parent.
Mike Turquetteb24764902012-03-15 23:11:19 -0700999 *
1000 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1001 * if necessary.
Mike Turquetteb24764902012-03-15 23:11:19 -07001002 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001003static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
Mike Turquetteb24764902012-03-15 23:11:19 -07001004{
1005 unsigned long old_rate;
1006 unsigned long parent_rate = 0;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001007 struct clk_core *child;
Mike Turquetteb24764902012-03-15 23:11:19 -07001008
Krzysztof Kozlowski496eadf2015-01-09 09:28:10 +01001009 lockdep_assert_held(&prepare_lock);
1010
Stephen Boydd6968fc2015-04-30 13:54:13 -07001011 old_rate = core->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07001012
Stephen Boydd6968fc2015-04-30 13:54:13 -07001013 if (core->parent)
1014 parent_rate = core->parent->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07001015
Stephen Boydd6968fc2015-04-30 13:54:13 -07001016 core->rate = clk_recalc(core, parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001017
1018 /*
1019 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1020 * & ABORT_RATE_CHANGE notifiers
1021 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001022 if (core->notifier_count && msg)
1023 __clk_notify(core, msg, old_rate, core->rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001024
Stephen Boydd6968fc2015-04-30 13:54:13 -07001025 hlist_for_each_entry(child, &core->children, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -07001026 __clk_recalc_rates(child, msg);
1027}
1028
Stephen Boydd6968fc2015-04-30 13:54:13 -07001029static unsigned long clk_core_get_rate(struct clk_core *core)
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001030{
1031 unsigned long rate;
1032
1033 clk_prepare_lock();
1034
Stephen Boydd6968fc2015-04-30 13:54:13 -07001035 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1036 __clk_recalc_rates(core, 0);
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001037
Stephen Boydd6968fc2015-04-30 13:54:13 -07001038 rate = clk_core_get_rate_nolock(core);
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001039 clk_prepare_unlock();
1040
1041 return rate;
1042}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001043
Mike Turquetteb24764902012-03-15 23:11:19 -07001044/**
Ulf Hanssona093bde2012-08-31 14:21:28 +02001045 * clk_get_rate - return the rate of clk
1046 * @clk: the clk whose rate is being returned
1047 *
1048 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1049 * is set, which means a recalc_rate will be issued.
1050 * If clk is NULL then returns 0.
1051 */
1052unsigned long clk_get_rate(struct clk *clk)
1053{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001054 if (!clk)
1055 return 0;
Ulf Hanssona093bde2012-08-31 14:21:28 +02001056
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001057 return clk_core_get_rate(clk->core);
Ulf Hanssona093bde2012-08-31 14:21:28 +02001058}
1059EXPORT_SYMBOL_GPL(clk_get_rate);
1060
Stephen Boydd6968fc2015-04-30 13:54:13 -07001061static int clk_fetch_parent_index(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001062 struct clk_core *parent)
James Hogan4935b222013-07-29 12:24:59 +01001063{
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001064 int i;
James Hogan4935b222013-07-29 12:24:59 +01001065
Stephen Boydd6968fc2015-04-30 13:54:13 -07001066 if (!core->parents) {
1067 core->parents = kcalloc(core->num_parents,
Tomasz Figa96a7ed92013-09-29 02:37:15 +02001068 sizeof(struct clk *), GFP_KERNEL);
Stephen Boydd6968fc2015-04-30 13:54:13 -07001069 if (!core->parents)
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001070 return -ENOMEM;
1071 }
James Hogan4935b222013-07-29 12:24:59 +01001072
1073 /*
1074 * find index of new parent clock using cached parent ptrs,
1075 * or if not yet cached, use string name comparison and cache
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001076 * them now to avoid future calls to clk_core_lookup.
James Hogan4935b222013-07-29 12:24:59 +01001077 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001078 for (i = 0; i < core->num_parents; i++) {
1079 if (core->parents[i] == parent)
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001080 return i;
Tomasz Figada0f0b22013-09-29 02:37:16 +02001081
Stephen Boydd6968fc2015-04-30 13:54:13 -07001082 if (core->parents[i])
Tomasz Figada0f0b22013-09-29 02:37:16 +02001083 continue;
1084
Stephen Boydd6968fc2015-04-30 13:54:13 -07001085 if (!strcmp(core->parent_names[i], parent->name)) {
1086 core->parents[i] = clk_core_lookup(parent->name);
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001087 return i;
James Hogan4935b222013-07-29 12:24:59 +01001088 }
1089 }
1090
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001091 return -EINVAL;
James Hogan4935b222013-07-29 12:24:59 +01001092}
1093
Stephen Boydd6968fc2015-04-30 13:54:13 -07001094static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
James Hogan4935b222013-07-29 12:24:59 +01001095{
Stephen Boydd6968fc2015-04-30 13:54:13 -07001096 hlist_del(&core->child_node);
James Hogan4935b222013-07-29 12:24:59 +01001097
James Hogan903efc52013-08-29 12:10:51 +01001098 if (new_parent) {
1099 /* avoid duplicate POST_RATE_CHANGE notifications */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001100 if (new_parent->new_child == core)
James Hogan903efc52013-08-29 12:10:51 +01001101 new_parent->new_child = NULL;
1102
Stephen Boydd6968fc2015-04-30 13:54:13 -07001103 hlist_add_head(&core->child_node, &new_parent->children);
James Hogan903efc52013-08-29 12:10:51 +01001104 } else {
Stephen Boydd6968fc2015-04-30 13:54:13 -07001105 hlist_add_head(&core->child_node, &clk_orphan_list);
James Hogan903efc52013-08-29 12:10:51 +01001106 }
James Hogan4935b222013-07-29 12:24:59 +01001107
Stephen Boydd6968fc2015-04-30 13:54:13 -07001108 core->parent = new_parent;
James Hogan4935b222013-07-29 12:24:59 +01001109}
1110
Stephen Boydd6968fc2015-04-30 13:54:13 -07001111static struct clk_core *__clk_set_parent_before(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001112 struct clk_core *parent)
James Hogan4935b222013-07-29 12:24:59 +01001113{
1114 unsigned long flags;
Stephen Boydd6968fc2015-04-30 13:54:13 -07001115 struct clk_core *old_parent = core->parent;
James Hogan4935b222013-07-29 12:24:59 +01001116
1117 /*
1118 * Migrate prepare state between parents and prevent race with
1119 * clk_enable().
1120 *
1121 * If the clock is not prepared, then a race with
1122 * clk_enable/disable() is impossible since we already have the
1123 * prepare lock (future calls to clk_enable() need to be preceded by
1124 * a clk_prepare()).
1125 *
1126 * If the clock is prepared, migrate the prepared state to the new
1127 * parent and also protect against a race with clk_enable() by
1128 * forcing the clock and the new parent on. This ensures that all
1129 * future calls to clk_enable() are practically NOPs with respect to
1130 * hardware and software states.
1131 *
1132 * See also: Comment for clk_set_parent() below.
1133 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001134 if (core->prepare_count) {
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001135 clk_core_prepare(parent);
Dong Aishengd2a5d462015-04-15 22:26:36 +08001136 flags = clk_enable_lock();
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001137 clk_core_enable(parent);
Stephen Boydd6968fc2015-04-30 13:54:13 -07001138 clk_core_enable(core);
Dong Aishengd2a5d462015-04-15 22:26:36 +08001139 clk_enable_unlock(flags);
James Hogan4935b222013-07-29 12:24:59 +01001140 }
1141
1142 /* update the clk tree topology */
1143 flags = clk_enable_lock();
Stephen Boydd6968fc2015-04-30 13:54:13 -07001144 clk_reparent(core, parent);
James Hogan4935b222013-07-29 12:24:59 +01001145 clk_enable_unlock(flags);
1146
Stephen Boyd3fa22522014-01-15 10:47:22 -08001147 return old_parent;
1148}
1149
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001150static void __clk_set_parent_after(struct clk_core *core,
1151 struct clk_core *parent,
1152 struct clk_core *old_parent)
Stephen Boyd3fa22522014-01-15 10:47:22 -08001153{
Dong Aishengd2a5d462015-04-15 22:26:36 +08001154 unsigned long flags;
1155
Stephen Boyd3fa22522014-01-15 10:47:22 -08001156 /*
1157 * Finish the migration of prepare state and undo the changes done
1158 * for preventing a race with clk_enable().
1159 */
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001160 if (core->prepare_count) {
Dong Aishengd2a5d462015-04-15 22:26:36 +08001161 flags = clk_enable_lock();
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001162 clk_core_disable(core);
1163 clk_core_disable(old_parent);
Dong Aishengd2a5d462015-04-15 22:26:36 +08001164 clk_enable_unlock(flags);
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001165 clk_core_unprepare(old_parent);
Stephen Boyd3fa22522014-01-15 10:47:22 -08001166 }
Stephen Boyd3fa22522014-01-15 10:47:22 -08001167}
1168
Stephen Boydd6968fc2015-04-30 13:54:13 -07001169static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001170 u8 p_index)
Stephen Boyd3fa22522014-01-15 10:47:22 -08001171{
1172 unsigned long flags;
1173 int ret = 0;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001174 struct clk_core *old_parent;
Stephen Boyd3fa22522014-01-15 10:47:22 -08001175
Stephen Boydd6968fc2015-04-30 13:54:13 -07001176 old_parent = __clk_set_parent_before(core, parent);
Stephen Boyd3fa22522014-01-15 10:47:22 -08001177
Stephen Boydd6968fc2015-04-30 13:54:13 -07001178 trace_clk_set_parent(core, parent);
Stephen Boyddfc202e2015-02-02 14:37:41 -08001179
James Hogan4935b222013-07-29 12:24:59 +01001180 /* change clock input source */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001181 if (parent && core->ops->set_parent)
1182 ret = core->ops->set_parent(core->hw, p_index);
James Hogan4935b222013-07-29 12:24:59 +01001183
Stephen Boydd6968fc2015-04-30 13:54:13 -07001184 trace_clk_set_parent_complete(core, parent);
Stephen Boyddfc202e2015-02-02 14:37:41 -08001185
James Hogan4935b222013-07-29 12:24:59 +01001186 if (ret) {
1187 flags = clk_enable_lock();
Stephen Boydd6968fc2015-04-30 13:54:13 -07001188 clk_reparent(core, old_parent);
James Hogan4935b222013-07-29 12:24:59 +01001189 clk_enable_unlock(flags);
1190
Stephen Boydd6968fc2015-04-30 13:54:13 -07001191 if (core->prepare_count) {
Dong Aishengd2a5d462015-04-15 22:26:36 +08001192 flags = clk_enable_lock();
Stephen Boydd6968fc2015-04-30 13:54:13 -07001193 clk_core_disable(core);
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001194 clk_core_disable(parent);
Dong Aishengd2a5d462015-04-15 22:26:36 +08001195 clk_enable_unlock(flags);
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001196 clk_core_unprepare(parent);
James Hogan4935b222013-07-29 12:24:59 +01001197 }
1198 return ret;
1199 }
1200
Stephen Boydd6968fc2015-04-30 13:54:13 -07001201 __clk_set_parent_after(core, parent, old_parent);
James Hogan4935b222013-07-29 12:24:59 +01001202
James Hogan4935b222013-07-29 12:24:59 +01001203 return 0;
1204}
1205
Ulf Hanssona093bde2012-08-31 14:21:28 +02001206/**
Mike Turquetteb24764902012-03-15 23:11:19 -07001207 * __clk_speculate_rates
Stephen Boydd6968fc2015-04-30 13:54:13 -07001208 * @core: first clk in the subtree
Mike Turquetteb24764902012-03-15 23:11:19 -07001209 * @parent_rate: the "future" rate of clk's parent
1210 *
1211 * Walks the subtree of clks starting with clk, speculating rates as it
1212 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1213 *
1214 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1215 * pre-rate change notifications and returns early if no clks in the
1216 * subtree have subscribed to the notifications. Note that if a clk does not
1217 * implement the .recalc_rate callback then it is assumed that the clock will
Peter Meerwald24ee1a02013-06-29 15:14:19 +02001218 * take on the rate of its parent.
Mike Turquetteb24764902012-03-15 23:11:19 -07001219 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001220static int __clk_speculate_rates(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001221 unsigned long parent_rate)
Mike Turquetteb24764902012-03-15 23:11:19 -07001222{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001223 struct clk_core *child;
Mike Turquetteb24764902012-03-15 23:11:19 -07001224 unsigned long new_rate;
1225 int ret = NOTIFY_DONE;
1226
Krzysztof Kozlowski496eadf2015-01-09 09:28:10 +01001227 lockdep_assert_held(&prepare_lock);
1228
Stephen Boydd6968fc2015-04-30 13:54:13 -07001229 new_rate = clk_recalc(core, parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001230
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001231 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001232 if (core->notifier_count)
1233 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001234
Mike Turquette86bcfa22014-02-24 16:08:41 -08001235 if (ret & NOTIFY_STOP_MASK) {
1236 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07001237 __func__, core->name, ret);
Mike Turquetteb24764902012-03-15 23:11:19 -07001238 goto out;
Mike Turquette86bcfa22014-02-24 16:08:41 -08001239 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001240
Stephen Boydd6968fc2015-04-30 13:54:13 -07001241 hlist_for_each_entry(child, &core->children, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -07001242 ret = __clk_speculate_rates(child, new_rate);
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001243 if (ret & NOTIFY_STOP_MASK)
Mike Turquetteb24764902012-03-15 23:11:19 -07001244 break;
1245 }
1246
1247out:
1248 return ret;
1249}
1250
Stephen Boydd6968fc2015-04-30 13:54:13 -07001251static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001252 struct clk_core *new_parent, u8 p_index)
Mike Turquetteb24764902012-03-15 23:11:19 -07001253{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001254 struct clk_core *child;
Mike Turquetteb24764902012-03-15 23:11:19 -07001255
Stephen Boydd6968fc2015-04-30 13:54:13 -07001256 core->new_rate = new_rate;
1257 core->new_parent = new_parent;
1258 core->new_parent_index = p_index;
James Hogan71472c02013-07-29 12:25:00 +01001259 /* include clk in new parent's PRE_RATE_CHANGE notifications */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001260 core->new_child = NULL;
1261 if (new_parent && new_parent != core->parent)
1262 new_parent->new_child = core;
Mike Turquetteb24764902012-03-15 23:11:19 -07001263
Stephen Boydd6968fc2015-04-30 13:54:13 -07001264 hlist_for_each_entry(child, &core->children, child_node) {
Stephen Boyd8f2c2db2014-03-26 16:06:36 -07001265 child->new_rate = clk_recalc(child, new_rate);
James Hogan71472c02013-07-29 12:25:00 +01001266 clk_calc_subtree(child, child->new_rate, NULL, 0);
Mike Turquetteb24764902012-03-15 23:11:19 -07001267 }
1268}
1269
1270/*
1271 * calculate the new rates returning the topmost clock that has to be
1272 * changed.
1273 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001274static struct clk_core *clk_calc_new_rates(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001275 unsigned long rate)
Mike Turquetteb24764902012-03-15 23:11:19 -07001276{
Stephen Boydd6968fc2015-04-30 13:54:13 -07001277 struct clk_core *top = core;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001278 struct clk_core *old_parent, *parent;
Shawn Guo81536e02012-04-12 20:50:17 +08001279 unsigned long best_parent_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001280 unsigned long new_rate;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001281 unsigned long min_rate;
1282 unsigned long max_rate;
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001283 int p_index = 0;
Boris Brezillon03bc10a2015-03-29 03:48:48 +02001284 long ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07001285
Mike Turquette7452b212012-03-26 14:45:36 -07001286 /* sanity */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001287 if (IS_ERR_OR_NULL(core))
Mike Turquette7452b212012-03-26 14:45:36 -07001288 return NULL;
1289
Mike Turquette63f5c3b2012-05-02 16:23:43 -07001290 /* save parent rate, if it exists */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001291 parent = old_parent = core->parent;
James Hogan71472c02013-07-29 12:25:00 +01001292 if (parent)
1293 best_parent_rate = parent->rate;
Mike Turquette63f5c3b2012-05-02 16:23:43 -07001294
Stephen Boydd6968fc2015-04-30 13:54:13 -07001295 clk_core_get_boundaries(core, &min_rate, &max_rate);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001296
James Hogan71472c02013-07-29 12:25:00 +01001297 /* find the closest rate and parent clk/rate */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001298 if (core->ops->determine_rate) {
Boris Brezillon0817b622015-07-07 20:48:08 +02001299 struct clk_rate_request req;
1300
1301 req.rate = rate;
1302 req.min_rate = min_rate;
1303 req.max_rate = max_rate;
1304 if (parent) {
1305 req.best_parent_hw = parent->hw;
1306 req.best_parent_rate = parent->rate;
1307 } else {
1308 req.best_parent_hw = NULL;
1309 req.best_parent_rate = 0;
1310 }
1311
1312 ret = core->ops->determine_rate(core->hw, &req);
Boris Brezillon03bc10a2015-03-29 03:48:48 +02001313 if (ret < 0)
1314 return NULL;
1315
Boris Brezillon0817b622015-07-07 20:48:08 +02001316 best_parent_rate = req.best_parent_rate;
1317 new_rate = req.rate;
1318 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
Stephen Boydd6968fc2015-04-30 13:54:13 -07001319 } else if (core->ops->round_rate) {
1320 ret = core->ops->round_rate(core->hw, rate,
Boris Brezillon0817b622015-07-07 20:48:08 +02001321 &best_parent_rate);
Boris Brezillon03bc10a2015-03-29 03:48:48 +02001322 if (ret < 0)
1323 return NULL;
1324
1325 new_rate = ret;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001326 if (new_rate < min_rate || new_rate > max_rate)
1327 return NULL;
Stephen Boydd6968fc2015-04-30 13:54:13 -07001328 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
James Hogan71472c02013-07-29 12:25:00 +01001329 /* pass-through clock without adjustable parent */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001330 core->new_rate = core->rate;
James Hogan71472c02013-07-29 12:25:00 +01001331 return NULL;
1332 } else {
1333 /* pass-through clock with adjustable parent */
1334 top = clk_calc_new_rates(parent, rate);
1335 new_rate = parent->new_rate;
Mike Turquette63f5c3b2012-05-02 16:23:43 -07001336 goto out;
Mike Turquette7452b212012-03-26 14:45:36 -07001337 }
1338
James Hogan71472c02013-07-29 12:25:00 +01001339 /* some clocks must be gated to change parent */
1340 if (parent != old_parent &&
Stephen Boydd6968fc2015-04-30 13:54:13 -07001341 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
James Hogan71472c02013-07-29 12:25:00 +01001342 pr_debug("%s: %s not gated but wants to reparent\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07001343 __func__, core->name);
Mike Turquetteb24764902012-03-15 23:11:19 -07001344 return NULL;
1345 }
1346
James Hogan71472c02013-07-29 12:25:00 +01001347 /* try finding the new parent index */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001348 if (parent && core->num_parents > 1) {
1349 p_index = clk_fetch_parent_index(core, parent);
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001350 if (p_index < 0) {
James Hogan71472c02013-07-29 12:25:00 +01001351 pr_debug("%s: clk %s can not be parent of clk %s\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07001352 __func__, parent->name, core->name);
James Hogan71472c02013-07-29 12:25:00 +01001353 return NULL;
1354 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001355 }
1356
Stephen Boydd6968fc2015-04-30 13:54:13 -07001357 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
James Hogan71472c02013-07-29 12:25:00 +01001358 best_parent_rate != parent->rate)
1359 top = clk_calc_new_rates(parent, best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001360
1361out:
Stephen Boydd6968fc2015-04-30 13:54:13 -07001362 clk_calc_subtree(core, new_rate, parent, p_index);
Mike Turquetteb24764902012-03-15 23:11:19 -07001363
1364 return top;
1365}
1366
1367/*
1368 * Notify about rate changes in a subtree. Always walk down the whole tree
1369 * so that in case of an error we can walk down the whole tree again and
1370 * abort the change.
1371 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001372static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001373 unsigned long event)
Mike Turquetteb24764902012-03-15 23:11:19 -07001374{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001375 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
Mike Turquetteb24764902012-03-15 23:11:19 -07001376 int ret = NOTIFY_DONE;
1377
Stephen Boydd6968fc2015-04-30 13:54:13 -07001378 if (core->rate == core->new_rate)
Sachin Kamat5fda6852013-03-13 15:17:49 +05301379 return NULL;
Mike Turquetteb24764902012-03-15 23:11:19 -07001380
Stephen Boydd6968fc2015-04-30 13:54:13 -07001381 if (core->notifier_count) {
1382 ret = __clk_notify(core, event, core->rate, core->new_rate);
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001383 if (ret & NOTIFY_STOP_MASK)
Stephen Boydd6968fc2015-04-30 13:54:13 -07001384 fail_clk = core;
Mike Turquetteb24764902012-03-15 23:11:19 -07001385 }
1386
Stephen Boydd6968fc2015-04-30 13:54:13 -07001387 hlist_for_each_entry(child, &core->children, child_node) {
James Hogan71472c02013-07-29 12:25:00 +01001388 /* Skip children who will be reparented to another clock */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001389 if (child->new_parent && child->new_parent != core)
James Hogan71472c02013-07-29 12:25:00 +01001390 continue;
1391 tmp_clk = clk_propagate_rate_change(child, event);
1392 if (tmp_clk)
1393 fail_clk = tmp_clk;
1394 }
1395
Stephen Boydd6968fc2015-04-30 13:54:13 -07001396 /* handle the new child who might not be in core->children yet */
1397 if (core->new_child) {
1398 tmp_clk = clk_propagate_rate_change(core->new_child, event);
James Hogan71472c02013-07-29 12:25:00 +01001399 if (tmp_clk)
1400 fail_clk = tmp_clk;
Mike Turquetteb24764902012-03-15 23:11:19 -07001401 }
1402
1403 return fail_clk;
1404}
1405
1406/*
1407 * walk down a subtree and set the new rates notifying the rate
1408 * change on the way
1409 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001410static void clk_change_rate(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -07001411{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001412 struct clk_core *child;
Tero Kristo067bb172014-08-21 16:47:45 +03001413 struct hlist_node *tmp;
Mike Turquetteb24764902012-03-15 23:11:19 -07001414 unsigned long old_rate;
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001415 unsigned long best_parent_rate = 0;
Stephen Boyd3fa22522014-01-15 10:47:22 -08001416 bool skip_set_rate = false;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001417 struct clk_core *old_parent;
Mike Turquetteb24764902012-03-15 23:11:19 -07001418
Stephen Boydd6968fc2015-04-30 13:54:13 -07001419 old_rate = core->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07001420
Stephen Boydd6968fc2015-04-30 13:54:13 -07001421 if (core->new_parent)
1422 best_parent_rate = core->new_parent->rate;
1423 else if (core->parent)
1424 best_parent_rate = core->parent->rate;
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001425
Stephen Boydd6968fc2015-04-30 13:54:13 -07001426 if (core->new_parent && core->new_parent != core->parent) {
1427 old_parent = __clk_set_parent_before(core, core->new_parent);
1428 trace_clk_set_parent(core, core->new_parent);
Stephen Boyd3fa22522014-01-15 10:47:22 -08001429
Stephen Boydd6968fc2015-04-30 13:54:13 -07001430 if (core->ops->set_rate_and_parent) {
Stephen Boyd3fa22522014-01-15 10:47:22 -08001431 skip_set_rate = true;
Stephen Boydd6968fc2015-04-30 13:54:13 -07001432 core->ops->set_rate_and_parent(core->hw, core->new_rate,
Stephen Boyd3fa22522014-01-15 10:47:22 -08001433 best_parent_rate,
Stephen Boydd6968fc2015-04-30 13:54:13 -07001434 core->new_parent_index);
1435 } else if (core->ops->set_parent) {
1436 core->ops->set_parent(core->hw, core->new_parent_index);
Stephen Boyd3fa22522014-01-15 10:47:22 -08001437 }
1438
Stephen Boydd6968fc2015-04-30 13:54:13 -07001439 trace_clk_set_parent_complete(core, core->new_parent);
1440 __clk_set_parent_after(core, core->new_parent, old_parent);
Stephen Boyd3fa22522014-01-15 10:47:22 -08001441 }
1442
Stephen Boydd6968fc2015-04-30 13:54:13 -07001443 trace_clk_set_rate(core, core->new_rate);
Stephen Boyddfc202e2015-02-02 14:37:41 -08001444
Stephen Boydd6968fc2015-04-30 13:54:13 -07001445 if (!skip_set_rate && core->ops->set_rate)
1446 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001447
Stephen Boydd6968fc2015-04-30 13:54:13 -07001448 trace_clk_set_rate_complete(core, core->new_rate);
Stephen Boyddfc202e2015-02-02 14:37:41 -08001449
Stephen Boydd6968fc2015-04-30 13:54:13 -07001450 core->rate = clk_recalc(core, best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001451
Stephen Boydd6968fc2015-04-30 13:54:13 -07001452 if (core->notifier_count && old_rate != core->rate)
1453 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001454
Michael Turquette85e88fa2015-06-20 12:18:03 -07001455 if (core->flags & CLK_RECALC_NEW_RATES)
1456 (void)clk_calc_new_rates(core, core->new_rate);
Bartlomiej Zolnierkiewiczd8d91982015-04-03 18:43:44 +02001457
Tero Kristo067bb172014-08-21 16:47:45 +03001458 /*
1459 * Use safe iteration, as change_rate can actually swap parents
1460 * for certain clock types.
1461 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001462 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
James Hogan71472c02013-07-29 12:25:00 +01001463 /* Skip children who will be reparented to another clock */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001464 if (child->new_parent && child->new_parent != core)
James Hogan71472c02013-07-29 12:25:00 +01001465 continue;
Mike Turquetteb24764902012-03-15 23:11:19 -07001466 clk_change_rate(child);
James Hogan71472c02013-07-29 12:25:00 +01001467 }
1468
Stephen Boydd6968fc2015-04-30 13:54:13 -07001469 /* handle the new child who might not be in core->children yet */
1470 if (core->new_child)
1471 clk_change_rate(core->new_child);
Mike Turquetteb24764902012-03-15 23:11:19 -07001472}
1473
Stephen Boydd6968fc2015-04-30 13:54:13 -07001474static int clk_core_set_rate_nolock(struct clk_core *core,
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001475 unsigned long req_rate)
1476{
1477 struct clk_core *top, *fail_clk;
1478 unsigned long rate = req_rate;
1479 int ret = 0;
1480
Stephen Boydd6968fc2015-04-30 13:54:13 -07001481 if (!core)
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001482 return 0;
1483
1484 /* bail early if nothing to do */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001485 if (rate == clk_core_get_rate_nolock(core))
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001486 return 0;
1487
Stephen Boydd6968fc2015-04-30 13:54:13 -07001488 if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001489 return -EBUSY;
1490
1491 /* calculate new rates and get the topmost changed clock */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001492 top = clk_calc_new_rates(core, rate);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001493 if (!top)
1494 return -EINVAL;
1495
1496 /* notify that we are about to change rates */
1497 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1498 if (fail_clk) {
1499 pr_debug("%s: failed to set %s rate\n", __func__,
1500 fail_clk->name);
1501 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1502 return -EBUSY;
1503 }
1504
1505 /* change the rates */
1506 clk_change_rate(top);
1507
Stephen Boydd6968fc2015-04-30 13:54:13 -07001508 core->req_rate = req_rate;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001509
1510 return ret;
1511}
1512
Mike Turquetteb24764902012-03-15 23:11:19 -07001513/**
1514 * clk_set_rate - specify a new rate for clk
1515 * @clk: the clk whose rate is being changed
1516 * @rate: the new rate for clk
1517 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001518 * In the simplest case clk_set_rate will only adjust the rate of clk.
Mike Turquetteb24764902012-03-15 23:11:19 -07001519 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001520 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1521 * propagate up to clk's parent; whether or not this happens depends on the
1522 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
1523 * after calling .round_rate then upstream parent propagation is ignored. If
1524 * *parent_rate comes back with a new rate for clk's parent then we propagate
Peter Meerwald24ee1a02013-06-29 15:14:19 +02001525 * up to clk's parent and set its rate. Upward propagation will continue
Mike Turquette5654dc92012-03-26 11:51:34 -07001526 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1527 * .round_rate stops requesting changes to clk's parent_rate.
Mike Turquetteb24764902012-03-15 23:11:19 -07001528 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001529 * Rate changes are accomplished via tree traversal that also recalculates the
1530 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
Mike Turquetteb24764902012-03-15 23:11:19 -07001531 *
1532 * Returns 0 on success, -EERROR otherwise.
1533 */
1534int clk_set_rate(struct clk *clk, unsigned long rate)
1535{
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001536 int ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07001537
Mike Turquette89ac8d72013-08-21 23:58:09 -07001538 if (!clk)
1539 return 0;
1540
Mike Turquetteb24764902012-03-15 23:11:19 -07001541 /* prevent racing with updates to the clock topology */
Mike Turquetteeab89f62013-03-28 13:59:01 -07001542 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001543
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001544 ret = clk_core_set_rate_nolock(clk->core, rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001545
Mike Turquetteeab89f62013-03-28 13:59:01 -07001546 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001547
1548 return ret;
1549}
1550EXPORT_SYMBOL_GPL(clk_set_rate);
1551
1552/**
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001553 * clk_set_rate_range - set a rate range for a clock source
1554 * @clk: clock source
1555 * @min: desired minimum clock rate in Hz, inclusive
1556 * @max: desired maximum clock rate in Hz, inclusive
1557 *
1558 * Returns success (0) or negative errno.
1559 */
1560int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
1561{
1562 int ret = 0;
1563
1564 if (!clk)
1565 return 0;
1566
1567 if (min > max) {
1568 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
1569 __func__, clk->core->name, clk->dev_id, clk->con_id,
1570 min, max);
1571 return -EINVAL;
1572 }
1573
1574 clk_prepare_lock();
1575
1576 if (min != clk->min_rate || max != clk->max_rate) {
1577 clk->min_rate = min;
1578 clk->max_rate = max;
1579 ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
1580 }
1581
1582 clk_prepare_unlock();
1583
1584 return ret;
1585}
1586EXPORT_SYMBOL_GPL(clk_set_rate_range);
1587
1588/**
1589 * clk_set_min_rate - set a minimum clock rate for a clock source
1590 * @clk: clock source
1591 * @rate: desired minimum clock rate in Hz, inclusive
1592 *
1593 * Returns success (0) or negative errno.
1594 */
1595int clk_set_min_rate(struct clk *clk, unsigned long rate)
1596{
1597 if (!clk)
1598 return 0;
1599
1600 return clk_set_rate_range(clk, rate, clk->max_rate);
1601}
1602EXPORT_SYMBOL_GPL(clk_set_min_rate);
1603
1604/**
1605 * clk_set_max_rate - set a maximum clock rate for a clock source
1606 * @clk: clock source
1607 * @rate: desired maximum clock rate in Hz, inclusive
1608 *
1609 * Returns success (0) or negative errno.
1610 */
1611int clk_set_max_rate(struct clk *clk, unsigned long rate)
1612{
1613 if (!clk)
1614 return 0;
1615
1616 return clk_set_rate_range(clk, clk->min_rate, rate);
1617}
1618EXPORT_SYMBOL_GPL(clk_set_max_rate);
1619
1620/**
Mike Turquetteb24764902012-03-15 23:11:19 -07001621 * clk_get_parent - return the parent of a clk
1622 * @clk: the clk whose parent gets returned
1623 *
1624 * Simply returns clk->parent. Returns NULL if clk is NULL.
1625 */
1626struct clk *clk_get_parent(struct clk *clk)
1627{
1628 struct clk *parent;
1629
Mike Turquetteeab89f62013-03-28 13:59:01 -07001630 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001631 parent = __clk_get_parent(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -07001632 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001633
1634 return parent;
1635}
1636EXPORT_SYMBOL_GPL(clk_get_parent);
1637
1638/*
1639 * .get_parent is mandatory for clocks with multiple possible parents. It is
1640 * optional for single-parent clocks. Always call .get_parent if it is
1641 * available and WARN if it is missing for multi-parent clocks.
1642 *
1643 * For single-parent clocks without .get_parent, first check to see if the
1644 * .parents array exists, and if so use it to avoid an expensive tree
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001645 * traversal. If .parents does not exist then walk the tree.
Mike Turquetteb24764902012-03-15 23:11:19 -07001646 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001647static struct clk_core *__clk_init_parent(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -07001648{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001649 struct clk_core *ret = NULL;
Mike Turquetteb24764902012-03-15 23:11:19 -07001650 u8 index;
1651
1652 /* handle the trivial cases */
1653
Stephen Boydd6968fc2015-04-30 13:54:13 -07001654 if (!core->num_parents)
Mike Turquetteb24764902012-03-15 23:11:19 -07001655 goto out;
1656
Stephen Boydd6968fc2015-04-30 13:54:13 -07001657 if (core->num_parents == 1) {
1658 if (IS_ERR_OR_NULL(core->parent))
1659 core->parent = clk_core_lookup(core->parent_names[0]);
1660 ret = core->parent;
Mike Turquetteb24764902012-03-15 23:11:19 -07001661 goto out;
1662 }
1663
Stephen Boydd6968fc2015-04-30 13:54:13 -07001664 if (!core->ops->get_parent) {
1665 WARN(!core->ops->get_parent,
Mike Turquetteb24764902012-03-15 23:11:19 -07001666 "%s: multi-parent clocks must implement .get_parent\n",
1667 __func__);
1668 goto out;
1669 };
1670
1671 /*
Stephen Boydd6968fc2015-04-30 13:54:13 -07001672 * Do our best to cache parent clocks in core->parents. This prevents
1673 * unnecessary and expensive lookups. We don't set core->parent here;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001674 * that is done by the calling function.
Mike Turquetteb24764902012-03-15 23:11:19 -07001675 */
1676
Stephen Boydd6968fc2015-04-30 13:54:13 -07001677 index = core->ops->get_parent(core->hw);
Mike Turquetteb24764902012-03-15 23:11:19 -07001678
Stephen Boydd6968fc2015-04-30 13:54:13 -07001679 if (!core->parents)
1680 core->parents =
1681 kcalloc(core->num_parents, sizeof(struct clk *),
Mike Turquetteb24764902012-03-15 23:11:19 -07001682 GFP_KERNEL);
1683
Stephen Boydd6968fc2015-04-30 13:54:13 -07001684 ret = clk_core_get_parent_by_index(core, index);
Mike Turquetteb24764902012-03-15 23:11:19 -07001685
1686out:
1687 return ret;
1688}
1689
Stephen Boydd6968fc2015-04-30 13:54:13 -07001690static void clk_core_reparent(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001691 struct clk_core *new_parent)
Ulf Hanssonb33d2122013-04-02 23:09:37 +02001692{
Stephen Boydd6968fc2015-04-30 13:54:13 -07001693 clk_reparent(core, new_parent);
1694 __clk_recalc_accuracies(core);
1695 __clk_recalc_rates(core, POST_RATE_CHANGE);
Mike Turquetteb24764902012-03-15 23:11:19 -07001696}
1697
Tomeu Vizoso42c86542015-03-11 11:34:25 +01001698void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
1699{
1700 if (!hw)
1701 return;
1702
1703 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
1704}
1705
Mike Turquetteb24764902012-03-15 23:11:19 -07001706/**
Thierry Reding4e88f3d2015-01-21 17:13:00 +01001707 * clk_has_parent - check if a clock is a possible parent for another
1708 * @clk: clock source
1709 * @parent: parent clock source
Mike Turquetteb24764902012-03-15 23:11:19 -07001710 *
Thierry Reding4e88f3d2015-01-21 17:13:00 +01001711 * This function can be used in drivers that need to check that a clock can be
1712 * the parent of another without actually changing the parent.
Saravana Kannanf8aa0bd2013-05-15 21:07:24 -07001713 *
Thierry Reding4e88f3d2015-01-21 17:13:00 +01001714 * Returns true if @parent is a possible parent for @clk, false otherwise.
Mike Turquetteb24764902012-03-15 23:11:19 -07001715 */
Thierry Reding4e88f3d2015-01-21 17:13:00 +01001716bool clk_has_parent(struct clk *clk, struct clk *parent)
1717{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001718 struct clk_core *core, *parent_core;
Thierry Reding4e88f3d2015-01-21 17:13:00 +01001719 unsigned int i;
1720
1721 /* NULL clocks should be nops, so return success if either is NULL. */
1722 if (!clk || !parent)
1723 return true;
1724
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001725 core = clk->core;
1726 parent_core = parent->core;
1727
Thierry Reding4e88f3d2015-01-21 17:13:00 +01001728 /* Optimize for the case where the parent is already the parent. */
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001729 if (core->parent == parent_core)
Thierry Reding4e88f3d2015-01-21 17:13:00 +01001730 return true;
1731
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001732 for (i = 0; i < core->num_parents; i++)
1733 if (strcmp(core->parent_names[i], parent_core->name) == 0)
Thierry Reding4e88f3d2015-01-21 17:13:00 +01001734 return true;
1735
1736 return false;
1737}
1738EXPORT_SYMBOL_GPL(clk_has_parent);
1739
Stephen Boydd6968fc2015-04-30 13:54:13 -07001740static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
Mike Turquetteb24764902012-03-15 23:11:19 -07001741{
1742 int ret = 0;
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001743 int p_index = 0;
Ulf Hansson031dcc92013-04-02 23:09:38 +02001744 unsigned long p_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001745
Stephen Boydd6968fc2015-04-30 13:54:13 -07001746 if (!core)
Mike Turquette89ac8d72013-08-21 23:58:09 -07001747 return 0;
1748
Mike Turquetteb24764902012-03-15 23:11:19 -07001749 /* prevent racing with updates to the clock topology */
Mike Turquetteeab89f62013-03-28 13:59:01 -07001750 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001751
Stephen Boydd6968fc2015-04-30 13:54:13 -07001752 if (core->parent == parent)
Mike Turquetteb24764902012-03-15 23:11:19 -07001753 goto out;
1754
Stephen Boydb61c43c2015-02-02 14:11:25 -08001755 /* verify ops for for multi-parent clks */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001756 if ((core->num_parents > 1) && (!core->ops->set_parent)) {
Stephen Boydb61c43c2015-02-02 14:11:25 -08001757 ret = -ENOSYS;
1758 goto out;
1759 }
1760
Ulf Hansson031dcc92013-04-02 23:09:38 +02001761 /* check that we are allowed to re-parent if the clock is in use */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001762 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
Ulf Hansson031dcc92013-04-02 23:09:38 +02001763 ret = -EBUSY;
1764 goto out;
1765 }
1766
1767 /* try finding the new parent index */
1768 if (parent) {
Stephen Boydd6968fc2015-04-30 13:54:13 -07001769 p_index = clk_fetch_parent_index(core, parent);
Ulf Hansson031dcc92013-04-02 23:09:38 +02001770 p_rate = parent->rate;
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001771 if (p_index < 0) {
Ulf Hansson031dcc92013-04-02 23:09:38 +02001772 pr_debug("%s: clk %s can not be parent of clk %s\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07001773 __func__, parent->name, core->name);
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001774 ret = p_index;
Ulf Hansson031dcc92013-04-02 23:09:38 +02001775 goto out;
1776 }
1777 }
1778
Mike Turquetteb24764902012-03-15 23:11:19 -07001779 /* propagate PRE_RATE_CHANGE notifications */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001780 ret = __clk_speculate_rates(core, p_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001781
1782 /* abort if a driver objects */
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001783 if (ret & NOTIFY_STOP_MASK)
Mike Turquetteb24764902012-03-15 23:11:19 -07001784 goto out;
1785
Ulf Hansson031dcc92013-04-02 23:09:38 +02001786 /* do the re-parent */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001787 ret = __clk_set_parent(core, parent, p_index);
Mike Turquetteb24764902012-03-15 23:11:19 -07001788
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001789 /* propagate rate an accuracy recalculation accordingly */
1790 if (ret) {
Stephen Boydd6968fc2015-04-30 13:54:13 -07001791 __clk_recalc_rates(core, ABORT_RATE_CHANGE);
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001792 } else {
Stephen Boydd6968fc2015-04-30 13:54:13 -07001793 __clk_recalc_rates(core, POST_RATE_CHANGE);
1794 __clk_recalc_accuracies(core);
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001795 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001796
1797out:
Mike Turquetteeab89f62013-03-28 13:59:01 -07001798 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001799
1800 return ret;
1801}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001802
1803/**
1804 * clk_set_parent - switch the parent of a mux clk
1805 * @clk: the mux clk whose input we are switching
1806 * @parent: the new input to clk
1807 *
1808 * Re-parent clk to use parent as its new input source. If clk is in
1809 * prepared state, the clk will get enabled for the duration of this call. If
1810 * that's not acceptable for a specific clk (Eg: the consumer can't handle
1811 * that, the reparenting is glitchy in hardware, etc), use the
1812 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
1813 *
1814 * After successfully changing clk's parent clk_set_parent will update the
1815 * clk topology, sysfs topology and propagate rate recalculation via
1816 * __clk_recalc_rates.
1817 *
1818 * Returns 0 on success, -EERROR otherwise.
1819 */
1820int clk_set_parent(struct clk *clk, struct clk *parent)
1821{
1822 if (!clk)
1823 return 0;
1824
1825 return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
1826}
Mike Turquetteb24764902012-03-15 23:11:19 -07001827EXPORT_SYMBOL_GPL(clk_set_parent);
1828
1829/**
Mike Turquettee59c5372014-02-18 21:21:25 -08001830 * clk_set_phase - adjust the phase shift of a clock signal
1831 * @clk: clock signal source
1832 * @degrees: number of degrees the signal is shifted
1833 *
1834 * Shifts the phase of a clock signal by the specified
1835 * degrees. Returns 0 on success, -EERROR otherwise.
1836 *
1837 * This function makes no distinction about the input or reference
1838 * signal that we adjust the clock signal phase against. For example
1839 * phase locked-loop clock signal generators we may shift phase with
1840 * respect to feedback clock signal input, but for other cases the
1841 * clock phase may be shifted with respect to some other, unspecified
1842 * signal.
1843 *
1844 * Additionally the concept of phase shift does not propagate through
1845 * the clock tree hierarchy, which sets it apart from clock rates and
1846 * clock accuracy. A parent clock phase attribute does not have an
1847 * impact on the phase attribute of a child clock.
1848 */
1849int clk_set_phase(struct clk *clk, int degrees)
1850{
Stephen Boyd08b95752015-02-02 14:09:43 -08001851 int ret = -EINVAL;
Mike Turquettee59c5372014-02-18 21:21:25 -08001852
1853 if (!clk)
Stephen Boyd08b95752015-02-02 14:09:43 -08001854 return 0;
Mike Turquettee59c5372014-02-18 21:21:25 -08001855
1856 /* sanity check degrees */
1857 degrees %= 360;
1858 if (degrees < 0)
1859 degrees += 360;
1860
1861 clk_prepare_lock();
1862
Stephen Boyddfc202e2015-02-02 14:37:41 -08001863 trace_clk_set_phase(clk->core, degrees);
1864
Stephen Boyd08b95752015-02-02 14:09:43 -08001865 if (clk->core->ops->set_phase)
1866 ret = clk->core->ops->set_phase(clk->core->hw, degrees);
Mike Turquettee59c5372014-02-18 21:21:25 -08001867
Stephen Boyddfc202e2015-02-02 14:37:41 -08001868 trace_clk_set_phase_complete(clk->core, degrees);
1869
Mike Turquettee59c5372014-02-18 21:21:25 -08001870 if (!ret)
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001871 clk->core->phase = degrees;
Mike Turquettee59c5372014-02-18 21:21:25 -08001872
Mike Turquettee59c5372014-02-18 21:21:25 -08001873 clk_prepare_unlock();
1874
Mike Turquettee59c5372014-02-18 21:21:25 -08001875 return ret;
1876}
Maxime Ripard9767b042015-01-20 22:23:43 +01001877EXPORT_SYMBOL_GPL(clk_set_phase);
Mike Turquettee59c5372014-02-18 21:21:25 -08001878
Stephen Boydd6968fc2015-04-30 13:54:13 -07001879static int clk_core_get_phase(struct clk_core *core)
Mike Turquettee59c5372014-02-18 21:21:25 -08001880{
Stephen Boyd1f3e1982015-04-30 14:21:56 -07001881 int ret;
Mike Turquettee59c5372014-02-18 21:21:25 -08001882
1883 clk_prepare_lock();
Stephen Boydd6968fc2015-04-30 13:54:13 -07001884 ret = core->phase;
Mike Turquettee59c5372014-02-18 21:21:25 -08001885 clk_prepare_unlock();
1886
Mike Turquettee59c5372014-02-18 21:21:25 -08001887 return ret;
1888}
1889
1890/**
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001891 * clk_get_phase - return the phase shift of a clock signal
1892 * @clk: clock signal source
1893 *
1894 * Returns the phase shift of a clock node in degrees, otherwise returns
1895 * -EERROR.
1896 */
1897int clk_get_phase(struct clk *clk)
1898{
1899 if (!clk)
1900 return 0;
1901
1902 return clk_core_get_phase(clk->core);
1903}
Stephen Boyd4dff95d2015-04-30 14:43:22 -07001904EXPORT_SYMBOL_GPL(clk_get_phase);
Mike Turquetteb24764902012-03-15 23:11:19 -07001905
1906/**
Michael Turquette3d3801e2015-02-25 09:11:01 -08001907 * clk_is_match - check if two clk's point to the same hardware clock
1908 * @p: clk compared against q
1909 * @q: clk compared against p
1910 *
1911 * Returns true if the two struct clk pointers both point to the same hardware
1912 * clock node. Put differently, returns true if struct clk *p and struct clk *q
1913 * share the same struct clk_core object.
1914 *
1915 * Returns false otherwise. Note that two NULL clks are treated as matching.
1916 */
1917bool clk_is_match(const struct clk *p, const struct clk *q)
1918{
1919 /* trivial case: identical struct clk's or both NULL */
1920 if (p == q)
1921 return true;
1922
1923 /* true if clk->core pointers match. Avoid derefing garbage */
1924 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
1925 if (p->core == q->core)
1926 return true;
1927
1928 return false;
1929}
1930EXPORT_SYMBOL_GPL(clk_is_match);
1931
Stephen Boyd4dff95d2015-04-30 14:43:22 -07001932/*** debugfs support ***/
1933
1934#ifdef CONFIG_DEBUG_FS
1935#include <linux/debugfs.h>
1936
1937static struct dentry *rootdir;
1938static int inited = 0;
1939static DEFINE_MUTEX(clk_debug_lock);
1940static HLIST_HEAD(clk_debug_list);
1941
1942static struct hlist_head *all_lists[] = {
1943 &clk_root_list,
1944 &clk_orphan_list,
1945 NULL,
1946};
1947
1948static struct hlist_head *orphan_list[] = {
1949 &clk_orphan_list,
1950 NULL,
1951};
1952
1953static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
1954 int level)
1955{
1956 if (!c)
1957 return;
1958
1959 seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
1960 level * 3 + 1, "",
1961 30 - level * 3, c->name,
1962 c->enable_count, c->prepare_count, clk_core_get_rate(c),
1963 clk_core_get_accuracy(c), clk_core_get_phase(c));
1964}
1965
1966static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
1967 int level)
1968{
1969 struct clk_core *child;
1970
1971 if (!c)
1972 return;
1973
1974 clk_summary_show_one(s, c, level);
1975
1976 hlist_for_each_entry(child, &c->children, child_node)
1977 clk_summary_show_subtree(s, child, level + 1);
1978}
1979
1980static int clk_summary_show(struct seq_file *s, void *data)
1981{
1982 struct clk_core *c;
1983 struct hlist_head **lists = (struct hlist_head **)s->private;
1984
1985 seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n");
1986 seq_puts(s, "----------------------------------------------------------------------------------------\n");
1987
1988 clk_prepare_lock();
1989
1990 for (; *lists; lists++)
1991 hlist_for_each_entry(c, *lists, child_node)
1992 clk_summary_show_subtree(s, c, 0);
1993
1994 clk_prepare_unlock();
1995
1996 return 0;
1997}
1998
1999
2000static int clk_summary_open(struct inode *inode, struct file *file)
2001{
2002 return single_open(file, clk_summary_show, inode->i_private);
2003}
2004
2005static const struct file_operations clk_summary_fops = {
2006 .open = clk_summary_open,
2007 .read = seq_read,
2008 .llseek = seq_lseek,
2009 .release = single_release,
2010};
2011
2012static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
2013{
2014 if (!c)
2015 return;
2016
Stefan Wahren7cb81132015-04-29 16:36:43 +00002017 /* This should be JSON format, i.e. elements separated with a comma */
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002018 seq_printf(s, "\"%s\": { ", c->name);
2019 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
2020 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
Stefan Wahren7cb81132015-04-29 16:36:43 +00002021 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
2022 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002023 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
2024}
2025
2026static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
2027{
2028 struct clk_core *child;
2029
2030 if (!c)
2031 return;
2032
2033 clk_dump_one(s, c, level);
2034
2035 hlist_for_each_entry(child, &c->children, child_node) {
2036 seq_printf(s, ",");
2037 clk_dump_subtree(s, child, level + 1);
2038 }
2039
2040 seq_printf(s, "}");
2041}
2042
2043static int clk_dump(struct seq_file *s, void *data)
2044{
2045 struct clk_core *c;
2046 bool first_node = true;
2047 struct hlist_head **lists = (struct hlist_head **)s->private;
2048
2049 seq_printf(s, "{");
2050
2051 clk_prepare_lock();
2052
2053 for (; *lists; lists++) {
2054 hlist_for_each_entry(c, *lists, child_node) {
2055 if (!first_node)
2056 seq_puts(s, ",");
2057 first_node = false;
2058 clk_dump_subtree(s, c, 0);
2059 }
2060 }
2061
2062 clk_prepare_unlock();
2063
Felipe Balbi70e9f4d2015-05-01 09:48:37 -05002064 seq_puts(s, "}\n");
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002065 return 0;
2066}
2067
2068
2069static int clk_dump_open(struct inode *inode, struct file *file)
2070{
2071 return single_open(file, clk_dump, inode->i_private);
2072}
2073
2074static const struct file_operations clk_dump_fops = {
2075 .open = clk_dump_open,
2076 .read = seq_read,
2077 .llseek = seq_lseek,
2078 .release = single_release,
2079};
2080
2081static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
2082{
2083 struct dentry *d;
2084 int ret = -ENOMEM;
2085
2086 if (!core || !pdentry) {
2087 ret = -EINVAL;
2088 goto out;
2089 }
2090
2091 d = debugfs_create_dir(core->name, pdentry);
2092 if (!d)
2093 goto out;
2094
2095 core->dentry = d;
2096
2097 d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry,
2098 (u32 *)&core->rate);
2099 if (!d)
2100 goto err_out;
2101
2102 d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry,
2103 (u32 *)&core->accuracy);
2104 if (!d)
2105 goto err_out;
2106
2107 d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry,
2108 (u32 *)&core->phase);
2109 if (!d)
2110 goto err_out;
2111
2112 d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry,
2113 (u32 *)&core->flags);
2114 if (!d)
2115 goto err_out;
2116
2117 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry,
2118 (u32 *)&core->prepare_count);
2119 if (!d)
2120 goto err_out;
2121
2122 d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry,
2123 (u32 *)&core->enable_count);
2124 if (!d)
2125 goto err_out;
2126
2127 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry,
2128 (u32 *)&core->notifier_count);
2129 if (!d)
2130 goto err_out;
2131
2132 if (core->ops->debug_init) {
2133 ret = core->ops->debug_init(core->hw, core->dentry);
2134 if (ret)
2135 goto err_out;
2136 }
2137
2138 ret = 0;
2139 goto out;
2140
2141err_out:
2142 debugfs_remove_recursive(core->dentry);
2143 core->dentry = NULL;
2144out:
2145 return ret;
2146}
2147
2148/**
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002149 * clk_debug_register - add a clk node to the debugfs clk directory
2150 * @core: the clk being added to the debugfs clk directory
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002151 *
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002152 * Dynamically adds a clk to the debugfs clk directory if debugfs has been
2153 * initialized. Otherwise it bails out early since the debugfs clk directory
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002154 * will be created lazily by clk_debug_init as part of a late_initcall.
2155 */
2156static int clk_debug_register(struct clk_core *core)
2157{
2158 int ret = 0;
2159
2160 mutex_lock(&clk_debug_lock);
2161 hlist_add_head(&core->debug_node, &clk_debug_list);
2162
2163 if (!inited)
2164 goto unlock;
2165
2166 ret = clk_debug_create_one(core, rootdir);
2167unlock:
2168 mutex_unlock(&clk_debug_lock);
2169
2170 return ret;
2171}
2172
2173 /**
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002174 * clk_debug_unregister - remove a clk node from the debugfs clk directory
2175 * @core: the clk being removed from the debugfs clk directory
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002176 *
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002177 * Dynamically removes a clk and all its child nodes from the
2178 * debugfs clk directory if clk->dentry points to debugfs created by
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002179 * clk_debug_register in __clk_init.
2180 */
2181static void clk_debug_unregister(struct clk_core *core)
2182{
2183 mutex_lock(&clk_debug_lock);
2184 hlist_del_init(&core->debug_node);
2185 debugfs_remove_recursive(core->dentry);
2186 core->dentry = NULL;
2187 mutex_unlock(&clk_debug_lock);
2188}
2189
2190struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
2191 void *data, const struct file_operations *fops)
2192{
2193 struct dentry *d = NULL;
2194
2195 if (hw->core->dentry)
2196 d = debugfs_create_file(name, mode, hw->core->dentry, data,
2197 fops);
2198
2199 return d;
2200}
2201EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
2202
2203/**
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002204 * clk_debug_init - lazily populate the debugfs clk directory
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002205 *
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002206 * clks are often initialized very early during boot before memory can be
2207 * dynamically allocated and well before debugfs is setup. This function
2208 * populates the debugfs clk directory once at boot-time when we know that
2209 * debugfs is setup. It should only be called once at boot-time, all other clks
2210 * added dynamically will be done so with clk_debug_register.
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002211 */
2212static int __init clk_debug_init(void)
2213{
2214 struct clk_core *core;
2215 struct dentry *d;
2216
2217 rootdir = debugfs_create_dir("clk", NULL);
2218
2219 if (!rootdir)
2220 return -ENOMEM;
2221
2222 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
2223 &clk_summary_fops);
2224 if (!d)
2225 return -ENOMEM;
2226
2227 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
2228 &clk_dump_fops);
2229 if (!d)
2230 return -ENOMEM;
2231
2232 d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
2233 &orphan_list, &clk_summary_fops);
2234 if (!d)
2235 return -ENOMEM;
2236
2237 d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
2238 &orphan_list, &clk_dump_fops);
2239 if (!d)
2240 return -ENOMEM;
2241
2242 mutex_lock(&clk_debug_lock);
2243 hlist_for_each_entry(core, &clk_debug_list, debug_node)
2244 clk_debug_create_one(core, rootdir);
2245
2246 inited = 1;
2247 mutex_unlock(&clk_debug_lock);
2248
2249 return 0;
2250}
2251late_initcall(clk_debug_init);
2252#else
2253static inline int clk_debug_register(struct clk_core *core) { return 0; }
2254static inline void clk_debug_reparent(struct clk_core *core,
2255 struct clk_core *new_parent)
2256{
2257}
2258static inline void clk_debug_unregister(struct clk_core *core)
2259{
2260}
2261#endif
2262
Michael Turquette3d3801e2015-02-25 09:11:01 -08002263/**
Mike Turquetteb24764902012-03-15 23:11:19 -07002264 * __clk_init - initialize the data structures in a struct clk
2265 * @dev: device initializing this clk, placeholder for now
2266 * @clk: clk being initialized
2267 *
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002268 * Initializes the lists in struct clk_core, queries the hardware for the
Mike Turquetteb24764902012-03-15 23:11:19 -07002269 * parent and rate and sets them both.
Mike Turquetteb24764902012-03-15 23:11:19 -07002270 */
Michael Turquetteb09d6d92015-01-29 14:22:50 -08002271static int __clk_init(struct device *dev, struct clk *clk_user)
Mike Turquetteb24764902012-03-15 23:11:19 -07002272{
Mike Turquetted1302a32012-03-29 14:30:40 -07002273 int i, ret = 0;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002274 struct clk_core *orphan;
Sasha Levinb67bfe02013-02-27 17:06:00 -08002275 struct hlist_node *tmp2;
Stephen Boydd6968fc2015-04-30 13:54:13 -07002276 struct clk_core *core;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002277 unsigned long rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07002278
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002279 if (!clk_user)
Mike Turquetted1302a32012-03-29 14:30:40 -07002280 return -EINVAL;
Mike Turquetteb24764902012-03-15 23:11:19 -07002281
Stephen Boydd6968fc2015-04-30 13:54:13 -07002282 core = clk_user->core;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002283
Mike Turquetteeab89f62013-03-28 13:59:01 -07002284 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002285
2286 /* check to see if a clock with this name is already registered */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002287 if (clk_core_lookup(core->name)) {
Mike Turquetted1302a32012-03-29 14:30:40 -07002288 pr_debug("%s: clk %s already initialized\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07002289 __func__, core->name);
Mike Turquetted1302a32012-03-29 14:30:40 -07002290 ret = -EEXIST;
Mike Turquetteb24764902012-03-15 23:11:19 -07002291 goto out;
Mike Turquetted1302a32012-03-29 14:30:40 -07002292 }
Mike Turquetteb24764902012-03-15 23:11:19 -07002293
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07002294 /* check that clk_ops are sane. See Documentation/clk.txt */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002295 if (core->ops->set_rate &&
2296 !((core->ops->round_rate || core->ops->determine_rate) &&
2297 core->ops->recalc_rate)) {
James Hogan71472c02013-07-29 12:25:00 +01002298 pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07002299 __func__, core->name);
Mike Turquetted1302a32012-03-29 14:30:40 -07002300 ret = -EINVAL;
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07002301 goto out;
2302 }
2303
Stephen Boydd6968fc2015-04-30 13:54:13 -07002304 if (core->ops->set_parent && !core->ops->get_parent) {
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07002305 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07002306 __func__, core->name);
Mike Turquetted1302a32012-03-29 14:30:40 -07002307 ret = -EINVAL;
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07002308 goto out;
2309 }
2310
Stephen Boydd6968fc2015-04-30 13:54:13 -07002311 if (core->ops->set_rate_and_parent &&
2312 !(core->ops->set_parent && core->ops->set_rate)) {
Stephen Boyd3fa22522014-01-15 10:47:22 -08002313 pr_warn("%s: %s must implement .set_parent & .set_rate\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07002314 __func__, core->name);
Stephen Boyd3fa22522014-01-15 10:47:22 -08002315 ret = -EINVAL;
2316 goto out;
2317 }
2318
Mike Turquetteb24764902012-03-15 23:11:19 -07002319 /* throw a WARN if any entries in parent_names are NULL */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002320 for (i = 0; i < core->num_parents; i++)
2321 WARN(!core->parent_names[i],
Mike Turquetteb24764902012-03-15 23:11:19 -07002322 "%s: invalid NULL in %s's .parent_names\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07002323 __func__, core->name);
Mike Turquetteb24764902012-03-15 23:11:19 -07002324
2325 /*
2326 * Allocate an array of struct clk *'s to avoid unnecessary string
2327 * look-ups of clk's possible parents. This can fail for clocks passed
Stephen Boydd6968fc2015-04-30 13:54:13 -07002328 * in to clk_init during early boot; thus any access to core->parents[]
Mike Turquetteb24764902012-03-15 23:11:19 -07002329 * must always check for a NULL pointer and try to populate it if
2330 * necessary.
2331 *
Stephen Boydd6968fc2015-04-30 13:54:13 -07002332 * If core->parents is not NULL we skip this entire block. This allows
2333 * for clock drivers to statically initialize core->parents.
Mike Turquetteb24764902012-03-15 23:11:19 -07002334 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002335 if (core->num_parents > 1 && !core->parents) {
2336 core->parents = kcalloc(core->num_parents, sizeof(struct clk *),
Tomasz Figa96a7ed92013-09-29 02:37:15 +02002337 GFP_KERNEL);
Mike Turquetteb24764902012-03-15 23:11:19 -07002338 /*
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002339 * clk_core_lookup returns NULL for parents that have not been
Mike Turquetteb24764902012-03-15 23:11:19 -07002340 * clk_init'd; thus any access to clk->parents[] must check
2341 * for a NULL pointer. We can always perform lazy lookups for
2342 * missing parents later on.
2343 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002344 if (core->parents)
2345 for (i = 0; i < core->num_parents; i++)
2346 core->parents[i] =
2347 clk_core_lookup(core->parent_names[i]);
Mike Turquetteb24764902012-03-15 23:11:19 -07002348 }
2349
Stephen Boydd6968fc2015-04-30 13:54:13 -07002350 core->parent = __clk_init_parent(core);
Mike Turquetteb24764902012-03-15 23:11:19 -07002351
2352 /*
Stephen Boydd6968fc2015-04-30 13:54:13 -07002353 * Populate core->parent if parent has already been __clk_init'd. If
Mike Turquetteb24764902012-03-15 23:11:19 -07002354 * parent has not yet been __clk_init'd then place clk in the orphan
2355 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
2356 * clk list.
2357 *
2358 * Every time a new clk is clk_init'd then we walk the list of orphan
2359 * clocks and re-parent any that are children of the clock currently
2360 * being clk_init'd.
2361 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002362 if (core->parent)
2363 hlist_add_head(&core->child_node,
2364 &core->parent->children);
2365 else if (core->flags & CLK_IS_ROOT)
2366 hlist_add_head(&core->child_node, &clk_root_list);
Mike Turquetteb24764902012-03-15 23:11:19 -07002367 else
Stephen Boydd6968fc2015-04-30 13:54:13 -07002368 hlist_add_head(&core->child_node, &clk_orphan_list);
Mike Turquetteb24764902012-03-15 23:11:19 -07002369
2370 /*
Boris BREZILLON5279fc42013-12-21 10:34:47 +01002371 * Set clk's accuracy. The preferred method is to use
2372 * .recalc_accuracy. For simple clocks and lazy developers the default
2373 * fallback is to use the parent's accuracy. If a clock doesn't have a
2374 * parent (or is orphaned) then accuracy is set to zero (perfect
2375 * clock).
2376 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002377 if (core->ops->recalc_accuracy)
2378 core->accuracy = core->ops->recalc_accuracy(core->hw,
2379 __clk_get_accuracy(core->parent));
2380 else if (core->parent)
2381 core->accuracy = core->parent->accuracy;
Boris BREZILLON5279fc42013-12-21 10:34:47 +01002382 else
Stephen Boydd6968fc2015-04-30 13:54:13 -07002383 core->accuracy = 0;
Boris BREZILLON5279fc42013-12-21 10:34:47 +01002384
2385 /*
Maxime Ripard9824cf72014-07-14 13:53:27 +02002386 * Set clk's phase.
2387 * Since a phase is by definition relative to its parent, just
2388 * query the current clock phase, or just assume it's in phase.
2389 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002390 if (core->ops->get_phase)
2391 core->phase = core->ops->get_phase(core->hw);
Maxime Ripard9824cf72014-07-14 13:53:27 +02002392 else
Stephen Boydd6968fc2015-04-30 13:54:13 -07002393 core->phase = 0;
Maxime Ripard9824cf72014-07-14 13:53:27 +02002394
2395 /*
Mike Turquetteb24764902012-03-15 23:11:19 -07002396 * Set clk's rate. The preferred method is to use .recalc_rate. For
2397 * simple clocks and lazy developers the default fallback is to use the
2398 * parent's rate. If a clock doesn't have a parent (or is orphaned)
2399 * then rate is set to zero.
2400 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002401 if (core->ops->recalc_rate)
2402 rate = core->ops->recalc_rate(core->hw,
2403 clk_core_get_rate_nolock(core->parent));
2404 else if (core->parent)
2405 rate = core->parent->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07002406 else
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002407 rate = 0;
Stephen Boydd6968fc2015-04-30 13:54:13 -07002408 core->rate = core->req_rate = rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07002409
2410 /*
2411 * walk the list of orphan clocks and reparent any that are children of
2412 * this clock
2413 */
Sasha Levinb67bfe02013-02-27 17:06:00 -08002414 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
Alex Elder12d298862013-09-05 08:33:24 -05002415 if (orphan->num_parents && orphan->ops->get_parent) {
Martin Fuzzey1f61e5f2012-11-22 20:15:05 +01002416 i = orphan->ops->get_parent(orphan->hw);
Stephen Boydd6968fc2015-04-30 13:54:13 -07002417 if (!strcmp(core->name, orphan->parent_names[i]))
2418 clk_core_reparent(orphan, core);
Martin Fuzzey1f61e5f2012-11-22 20:15:05 +01002419 continue;
2420 }
2421
Mike Turquetteb24764902012-03-15 23:11:19 -07002422 for (i = 0; i < orphan->num_parents; i++)
Stephen Boydd6968fc2015-04-30 13:54:13 -07002423 if (!strcmp(core->name, orphan->parent_names[i])) {
2424 clk_core_reparent(orphan, core);
Mike Turquetteb24764902012-03-15 23:11:19 -07002425 break;
2426 }
Martin Fuzzey1f61e5f2012-11-22 20:15:05 +01002427 }
Mike Turquetteb24764902012-03-15 23:11:19 -07002428
2429 /*
2430 * optional platform-specific magic
2431 *
2432 * The .init callback is not used by any of the basic clock types, but
2433 * exists for weird hardware that must perform initialization magic.
2434 * Please consider other ways of solving initialization problems before
Peter Meerwald24ee1a02013-06-29 15:14:19 +02002435 * using this callback, as its use is discouraged.
Mike Turquetteb24764902012-03-15 23:11:19 -07002436 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002437 if (core->ops->init)
2438 core->ops->init(core->hw);
Mike Turquetteb24764902012-03-15 23:11:19 -07002439
Stephen Boydd6968fc2015-04-30 13:54:13 -07002440 kref_init(&core->ref);
Mike Turquetteb24764902012-03-15 23:11:19 -07002441out:
Mike Turquetteeab89f62013-03-28 13:59:01 -07002442 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002443
Stephen Boyd89f7e9d2014-12-12 15:04:16 -08002444 if (!ret)
Stephen Boydd6968fc2015-04-30 13:54:13 -07002445 clk_debug_register(core);
Stephen Boyd89f7e9d2014-12-12 15:04:16 -08002446
Mike Turquetted1302a32012-03-29 14:30:40 -07002447 return ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07002448}
2449
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002450struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
2451 const char *con_id)
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002452{
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002453 struct clk *clk;
2454
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002455 /* This is to allow this function to be chained to others */
2456 if (!hw || IS_ERR(hw))
2457 return (struct clk *) hw;
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002458
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002459 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2460 if (!clk)
2461 return ERR_PTR(-ENOMEM);
2462
2463 clk->core = hw->core;
2464 clk->dev_id = dev_id;
2465 clk->con_id = con_id;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002466 clk->max_rate = ULONG_MAX;
2467
2468 clk_prepare_lock();
Stephen Boyd50595f82015-02-06 11:42:44 -08002469 hlist_add_head(&clk->clks_node, &hw->core->clks);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002470 clk_prepare_unlock();
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002471
2472 return clk;
2473}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002474
Stephen Boyd73e0e492015-02-06 11:42:43 -08002475void __clk_free_clk(struct clk *clk)
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002476{
2477 clk_prepare_lock();
Stephen Boyd50595f82015-02-06 11:42:44 -08002478 hlist_del(&clk->clks_node);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002479 clk_prepare_unlock();
2480
2481 kfree(clk);
2482}
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002483
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002484/**
2485 * clk_register - allocate a new clock, register it and return an opaque cookie
2486 * @dev: device that is registering this clock
2487 * @hw: link to hardware-specific clock data
2488 *
2489 * clk_register is the primary interface for populating the clock tree with new
2490 * clock nodes. It returns a pointer to the newly allocated struct clk which
Shailendra Vermaa59a5162015-05-21 00:06:48 +05302491 * cannot be dereferenced by driver code but may be used in conjunction with the
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002492 * rest of the clock API. In the event of an error clk_register will return an
2493 * error code; drivers must test for an error code after calling clk_register.
2494 */
2495struct clk *clk_register(struct device *dev, struct clk_hw *hw)
Mike Turquetteb24764902012-03-15 23:11:19 -07002496{
Mike Turquetted1302a32012-03-29 14:30:40 -07002497 int i, ret;
Stephen Boydd6968fc2015-04-30 13:54:13 -07002498 struct clk_core *core;
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002499
Stephen Boydd6968fc2015-04-30 13:54:13 -07002500 core = kzalloc(sizeof(*core), GFP_KERNEL);
2501 if (!core) {
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002502 ret = -ENOMEM;
2503 goto fail_out;
2504 }
Mike Turquetteb24764902012-03-15 23:11:19 -07002505
Stephen Boydd6968fc2015-04-30 13:54:13 -07002506 core->name = kstrdup_const(hw->init->name, GFP_KERNEL);
2507 if (!core->name) {
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002508 ret = -ENOMEM;
2509 goto fail_name;
2510 }
Stephen Boydd6968fc2015-04-30 13:54:13 -07002511 core->ops = hw->init->ops;
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02002512 if (dev && dev->driver)
Stephen Boydd6968fc2015-04-30 13:54:13 -07002513 core->owner = dev->driver->owner;
2514 core->hw = hw;
2515 core->flags = hw->init->flags;
2516 core->num_parents = hw->init->num_parents;
Stephen Boyd9783c0d2015-07-16 12:50:27 -07002517 core->min_rate = 0;
2518 core->max_rate = ULONG_MAX;
Stephen Boydd6968fc2015-04-30 13:54:13 -07002519 hw->core = core;
Mike Turquetteb24764902012-03-15 23:11:19 -07002520
Mike Turquetted1302a32012-03-29 14:30:40 -07002521 /* allocate local copy in case parent_names is __initdata */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002522 core->parent_names = kcalloc(core->num_parents, sizeof(char *),
Tomasz Figa96a7ed92013-09-29 02:37:15 +02002523 GFP_KERNEL);
Mike Turquetteb24764902012-03-15 23:11:19 -07002524
Stephen Boydd6968fc2015-04-30 13:54:13 -07002525 if (!core->parent_names) {
Mike Turquetted1302a32012-03-29 14:30:40 -07002526 ret = -ENOMEM;
2527 goto fail_parent_names;
2528 }
2529
2530
2531 /* copy each string name in case parent_names is __initdata */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002532 for (i = 0; i < core->num_parents; i++) {
2533 core->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002534 GFP_KERNEL);
Stephen Boydd6968fc2015-04-30 13:54:13 -07002535 if (!core->parent_names[i]) {
Mike Turquetted1302a32012-03-29 14:30:40 -07002536 ret = -ENOMEM;
2537 goto fail_parent_names_copy;
2538 }
2539 }
2540
Stephen Boydd6968fc2015-04-30 13:54:13 -07002541 INIT_HLIST_HEAD(&core->clks);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002542
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002543 hw->clk = __clk_create_clk(hw, NULL, NULL);
2544 if (IS_ERR(hw->clk)) {
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002545 ret = PTR_ERR(hw->clk);
2546 goto fail_parent_names_copy;
2547 }
Mike Turquetted1302a32012-03-29 14:30:40 -07002548
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002549 ret = __clk_init(dev, hw->clk);
Mike Turquetted1302a32012-03-29 14:30:40 -07002550 if (!ret)
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002551 return hw->clk;
2552
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002553 __clk_free_clk(hw->clk);
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002554 hw->clk = NULL;
Mike Turquetted1302a32012-03-29 14:30:40 -07002555
2556fail_parent_names_copy:
2557 while (--i >= 0)
Stephen Boydd6968fc2015-04-30 13:54:13 -07002558 kfree_const(core->parent_names[i]);
2559 kfree(core->parent_names);
Mike Turquetted1302a32012-03-29 14:30:40 -07002560fail_parent_names:
Stephen Boydd6968fc2015-04-30 13:54:13 -07002561 kfree_const(core->name);
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002562fail_name:
Stephen Boydd6968fc2015-04-30 13:54:13 -07002563 kfree(core);
Mike Turquetted1302a32012-03-29 14:30:40 -07002564fail_out:
2565 return ERR_PTR(ret);
Mike Turquetteb24764902012-03-15 23:11:19 -07002566}
2567EXPORT_SYMBOL_GPL(clk_register);
2568
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002569/* Free memory allocated for a clock. */
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002570static void __clk_release(struct kref *ref)
2571{
Stephen Boydd6968fc2015-04-30 13:54:13 -07002572 struct clk_core *core = container_of(ref, struct clk_core, ref);
2573 int i = core->num_parents;
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002574
Krzysztof Kozlowski496eadf2015-01-09 09:28:10 +01002575 lockdep_assert_held(&prepare_lock);
2576
Stephen Boydd6968fc2015-04-30 13:54:13 -07002577 kfree(core->parents);
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002578 while (--i >= 0)
Stephen Boydd6968fc2015-04-30 13:54:13 -07002579 kfree_const(core->parent_names[i]);
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002580
Stephen Boydd6968fc2015-04-30 13:54:13 -07002581 kfree(core->parent_names);
2582 kfree_const(core->name);
2583 kfree(core);
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002584}
2585
2586/*
2587 * Empty clk_ops for unregistered clocks. These are used temporarily
2588 * after clk_unregister() was called on a clock and until last clock
2589 * consumer calls clk_put() and the struct clk object is freed.
2590 */
2591static int clk_nodrv_prepare_enable(struct clk_hw *hw)
2592{
2593 return -ENXIO;
2594}
2595
2596static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
2597{
2598 WARN_ON_ONCE(1);
2599}
2600
2601static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
2602 unsigned long parent_rate)
2603{
2604 return -ENXIO;
2605}
2606
2607static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
2608{
2609 return -ENXIO;
2610}
2611
2612static const struct clk_ops clk_nodrv_ops = {
2613 .enable = clk_nodrv_prepare_enable,
2614 .disable = clk_nodrv_disable_unprepare,
2615 .prepare = clk_nodrv_prepare_enable,
2616 .unprepare = clk_nodrv_disable_unprepare,
2617 .set_rate = clk_nodrv_set_rate,
2618 .set_parent = clk_nodrv_set_parent,
2619};
2620
Mark Brown1df5c932012-04-18 09:07:12 +01002621/**
2622 * clk_unregister - unregister a currently registered clock
2623 * @clk: clock to unregister
Mark Brown1df5c932012-04-18 09:07:12 +01002624 */
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002625void clk_unregister(struct clk *clk)
2626{
2627 unsigned long flags;
2628
Stephen Boyd6314b672014-09-04 23:37:49 -07002629 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2630 return;
2631
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002632 clk_debug_unregister(clk->core);
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002633
2634 clk_prepare_lock();
2635
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002636 if (clk->core->ops == &clk_nodrv_ops) {
2637 pr_err("%s: unregistered clock: %s\n", __func__,
2638 clk->core->name);
Stephen Boyd6314b672014-09-04 23:37:49 -07002639 return;
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002640 }
2641 /*
2642 * Assign empty clock ops for consumers that might still hold
2643 * a reference to this clock.
2644 */
2645 flags = clk_enable_lock();
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002646 clk->core->ops = &clk_nodrv_ops;
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002647 clk_enable_unlock(flags);
2648
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002649 if (!hlist_empty(&clk->core->children)) {
2650 struct clk_core *child;
Stephen Boyd874f2242014-04-18 16:29:43 -07002651 struct hlist_node *t;
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002652
2653 /* Reparent all children to the orphan list. */
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002654 hlist_for_each_entry_safe(child, t, &clk->core->children,
2655 child_node)
2656 clk_core_set_parent(child, NULL);
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002657 }
2658
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002659 hlist_del_init(&clk->core->child_node);
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002660
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002661 if (clk->core->prepare_count)
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002662 pr_warn("%s: unregistering prepared clock: %s\n",
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002663 __func__, clk->core->name);
2664 kref_put(&clk->core->ref, __clk_release);
Stephen Boyd6314b672014-09-04 23:37:49 -07002665
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002666 clk_prepare_unlock();
2667}
Mark Brown1df5c932012-04-18 09:07:12 +01002668EXPORT_SYMBOL_GPL(clk_unregister);
2669
Stephen Boyd46c87732012-09-24 13:38:04 -07002670static void devm_clk_release(struct device *dev, void *res)
2671{
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002672 clk_unregister(*(struct clk **)res);
Stephen Boyd46c87732012-09-24 13:38:04 -07002673}
2674
2675/**
2676 * devm_clk_register - resource managed clk_register()
2677 * @dev: device that is registering this clock
2678 * @hw: link to hardware-specific clock data
2679 *
2680 * Managed clk_register(). Clocks returned from this function are
2681 * automatically clk_unregister()ed on driver detach. See clk_register() for
2682 * more information.
2683 */
2684struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
2685{
2686 struct clk *clk;
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002687 struct clk **clkp;
Stephen Boyd46c87732012-09-24 13:38:04 -07002688
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002689 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
2690 if (!clkp)
Stephen Boyd46c87732012-09-24 13:38:04 -07002691 return ERR_PTR(-ENOMEM);
2692
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002693 clk = clk_register(dev, hw);
2694 if (!IS_ERR(clk)) {
2695 *clkp = clk;
2696 devres_add(dev, clkp);
Stephen Boyd46c87732012-09-24 13:38:04 -07002697 } else {
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002698 devres_free(clkp);
Stephen Boyd46c87732012-09-24 13:38:04 -07002699 }
2700
2701 return clk;
2702}
2703EXPORT_SYMBOL_GPL(devm_clk_register);
2704
2705static int devm_clk_match(struct device *dev, void *res, void *data)
2706{
2707 struct clk *c = res;
2708 if (WARN_ON(!c))
2709 return 0;
2710 return c == data;
2711}
2712
2713/**
2714 * devm_clk_unregister - resource managed clk_unregister()
2715 * @clk: clock to unregister
2716 *
2717 * Deallocate a clock allocated with devm_clk_register(). Normally
2718 * this function will not need to be called and the resource management
2719 * code will ensure that the resource is freed.
2720 */
2721void devm_clk_unregister(struct device *dev, struct clk *clk)
2722{
2723 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
2724}
2725EXPORT_SYMBOL_GPL(devm_clk_unregister);
2726
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02002727/*
2728 * clkdev helpers
2729 */
2730int __clk_get(struct clk *clk)
2731{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002732 struct clk_core *core = !clk ? NULL : clk->core;
2733
2734 if (core) {
2735 if (!try_module_get(core->owner))
Sylwester Nawrocki00efcb12014-01-07 13:03:43 +01002736 return 0;
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02002737
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002738 kref_get(&core->ref);
Sylwester Nawrocki00efcb12014-01-07 13:03:43 +01002739 }
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02002740 return 1;
2741}
2742
2743void __clk_put(struct clk *clk)
2744{
Tomeu Vizoso10cdfe52014-12-02 08:54:19 +01002745 struct module *owner;
2746
Sylwester Nawrocki00efcb12014-01-07 13:03:43 +01002747 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02002748 return;
2749
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002750 clk_prepare_lock();
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002751
Stephen Boyd50595f82015-02-06 11:42:44 -08002752 hlist_del(&clk->clks_node);
Tomeu Vizosoec02ace2015-02-06 15:13:01 +01002753 if (clk->min_rate > clk->core->req_rate ||
2754 clk->max_rate < clk->core->req_rate)
2755 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
2756
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002757 owner = clk->core->owner;
2758 kref_put(&clk->core->ref, __clk_release);
2759
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002760 clk_prepare_unlock();
2761
Tomeu Vizoso10cdfe52014-12-02 08:54:19 +01002762 module_put(owner);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002763
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002764 kfree(clk);
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02002765}
2766
Mike Turquetteb24764902012-03-15 23:11:19 -07002767/*** clk rate change notifiers ***/
2768
2769/**
2770 * clk_notifier_register - add a clk rate change notifier
2771 * @clk: struct clk * to watch
2772 * @nb: struct notifier_block * with callback info
2773 *
2774 * Request notification when clk's rate changes. This uses an SRCU
2775 * notifier because we want it to block and notifier unregistrations are
2776 * uncommon. The callbacks associated with the notifier must not
2777 * re-enter into the clk framework by calling any top-level clk APIs;
2778 * this will cause a nested prepare_lock mutex.
2779 *
Soren Brinkmann5324fda2014-01-22 11:48:37 -08002780 * In all notification cases cases (pre, post and abort rate change) the
2781 * original clock rate is passed to the callback via struct
2782 * clk_notifier_data.old_rate and the new frequency is passed via struct
Mike Turquetteb24764902012-03-15 23:11:19 -07002783 * clk_notifier_data.new_rate.
2784 *
Mike Turquetteb24764902012-03-15 23:11:19 -07002785 * clk_notifier_register() must be called from non-atomic context.
2786 * Returns -EINVAL if called with null arguments, -ENOMEM upon
2787 * allocation failure; otherwise, passes along the return value of
2788 * srcu_notifier_chain_register().
2789 */
2790int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
2791{
2792 struct clk_notifier *cn;
2793 int ret = -ENOMEM;
2794
2795 if (!clk || !nb)
2796 return -EINVAL;
2797
Mike Turquetteeab89f62013-03-28 13:59:01 -07002798 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002799
2800 /* search the list of notifiers for this clk */
2801 list_for_each_entry(cn, &clk_notifier_list, node)
2802 if (cn->clk == clk)
2803 break;
2804
2805 /* if clk wasn't in the notifier list, allocate new clk_notifier */
2806 if (cn->clk != clk) {
2807 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
2808 if (!cn)
2809 goto out;
2810
2811 cn->clk = clk;
2812 srcu_init_notifier_head(&cn->notifier_head);
2813
2814 list_add(&cn->node, &clk_notifier_list);
2815 }
2816
2817 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
2818
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002819 clk->core->notifier_count++;
Mike Turquetteb24764902012-03-15 23:11:19 -07002820
2821out:
Mike Turquetteeab89f62013-03-28 13:59:01 -07002822 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002823
2824 return ret;
2825}
2826EXPORT_SYMBOL_GPL(clk_notifier_register);
2827
2828/**
2829 * clk_notifier_unregister - remove a clk rate change notifier
2830 * @clk: struct clk *
2831 * @nb: struct notifier_block * with callback info
2832 *
2833 * Request no further notification for changes to 'clk' and frees memory
2834 * allocated in clk_notifier_register.
2835 *
2836 * Returns -EINVAL if called with null arguments; otherwise, passes
2837 * along the return value of srcu_notifier_chain_unregister().
2838 */
2839int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
2840{
2841 struct clk_notifier *cn = NULL;
2842 int ret = -EINVAL;
2843
2844 if (!clk || !nb)
2845 return -EINVAL;
2846
Mike Turquetteeab89f62013-03-28 13:59:01 -07002847 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002848
2849 list_for_each_entry(cn, &clk_notifier_list, node)
2850 if (cn->clk == clk)
2851 break;
2852
2853 if (cn->clk == clk) {
2854 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
2855
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002856 clk->core->notifier_count--;
Mike Turquetteb24764902012-03-15 23:11:19 -07002857
2858 /* XXX the notifier code should handle this better */
2859 if (!cn->notifier_head.head) {
2860 srcu_cleanup_notifier_head(&cn->notifier_head);
Lai Jiangshan72b53222013-06-03 17:17:15 +08002861 list_del(&cn->node);
Mike Turquetteb24764902012-03-15 23:11:19 -07002862 kfree(cn);
2863 }
2864
2865 } else {
2866 ret = -ENOENT;
2867 }
2868
Mike Turquetteeab89f62013-03-28 13:59:01 -07002869 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002870
2871 return ret;
2872}
2873EXPORT_SYMBOL_GPL(clk_notifier_unregister);
Grant Likely766e6a42012-04-09 14:50:06 -05002874
2875#ifdef CONFIG_OF
2876/**
2877 * struct of_clk_provider - Clock provider registration structure
2878 * @link: Entry in global list of clock providers
2879 * @node: Pointer to device tree node of clock provider
2880 * @get: Get clock callback. Returns NULL or a struct clk for the
2881 * given clock specifier
2882 * @data: context pointer to be passed into @get callback
2883 */
2884struct of_clk_provider {
2885 struct list_head link;
2886
2887 struct device_node *node;
2888 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
2889 void *data;
2890};
2891
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +05302892static const struct of_device_id __clk_of_table_sentinel
2893 __used __section(__clk_of_table_end);
2894
Grant Likely766e6a42012-04-09 14:50:06 -05002895static LIST_HEAD(of_clk_providers);
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02002896static DEFINE_MUTEX(of_clk_mutex);
2897
Grant Likely766e6a42012-04-09 14:50:06 -05002898struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
2899 void *data)
2900{
2901 return data;
2902}
2903EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
2904
Shawn Guo494bfec2012-08-22 21:36:27 +08002905struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
2906{
2907 struct clk_onecell_data *clk_data = data;
2908 unsigned int idx = clkspec->args[0];
2909
2910 if (idx >= clk_data->clk_num) {
2911 pr_err("%s: invalid clock index %d\n", __func__, idx);
2912 return ERR_PTR(-EINVAL);
2913 }
2914
2915 return clk_data->clks[idx];
2916}
2917EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
2918
Grant Likely766e6a42012-04-09 14:50:06 -05002919/**
2920 * of_clk_add_provider() - Register a clock provider for a node
2921 * @np: Device node pointer associated with clock provider
2922 * @clk_src_get: callback for decoding clock
2923 * @data: context pointer for @clk_src_get callback.
2924 */
2925int of_clk_add_provider(struct device_node *np,
2926 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
2927 void *data),
2928 void *data)
2929{
2930 struct of_clk_provider *cp;
Sylwester Nawrocki86be4082014-06-18 17:29:32 +02002931 int ret;
Grant Likely766e6a42012-04-09 14:50:06 -05002932
2933 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
2934 if (!cp)
2935 return -ENOMEM;
2936
2937 cp->node = of_node_get(np);
2938 cp->data = data;
2939 cp->get = clk_src_get;
2940
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02002941 mutex_lock(&of_clk_mutex);
Grant Likely766e6a42012-04-09 14:50:06 -05002942 list_add(&cp->link, &of_clk_providers);
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02002943 mutex_unlock(&of_clk_mutex);
Grant Likely766e6a42012-04-09 14:50:06 -05002944 pr_debug("Added clock from %s\n", np->full_name);
2945
Sylwester Nawrocki86be4082014-06-18 17:29:32 +02002946 ret = of_clk_set_defaults(np, true);
2947 if (ret < 0)
2948 of_clk_del_provider(np);
2949
2950 return ret;
Grant Likely766e6a42012-04-09 14:50:06 -05002951}
2952EXPORT_SYMBOL_GPL(of_clk_add_provider);
2953
2954/**
2955 * of_clk_del_provider() - Remove a previously registered clock provider
2956 * @np: Device node pointer associated with clock provider
2957 */
2958void of_clk_del_provider(struct device_node *np)
2959{
2960 struct of_clk_provider *cp;
2961
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02002962 mutex_lock(&of_clk_mutex);
Grant Likely766e6a42012-04-09 14:50:06 -05002963 list_for_each_entry(cp, &of_clk_providers, link) {
2964 if (cp->node == np) {
2965 list_del(&cp->link);
2966 of_node_put(cp->node);
2967 kfree(cp);
2968 break;
2969 }
2970 }
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02002971 mutex_unlock(&of_clk_mutex);
Grant Likely766e6a42012-04-09 14:50:06 -05002972}
2973EXPORT_SYMBOL_GPL(of_clk_del_provider);
2974
Stephen Boyd73e0e492015-02-06 11:42:43 -08002975struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
2976 const char *dev_id, const char *con_id)
Grant Likely766e6a42012-04-09 14:50:06 -05002977{
2978 struct of_clk_provider *provider;
Jean-Francois Moinea34cd462013-11-25 19:47:04 +01002979 struct clk *clk = ERR_PTR(-EPROBE_DEFER);
Grant Likely766e6a42012-04-09 14:50:06 -05002980
Stephen Boyd306c3422015-02-05 15:39:11 -08002981 if (!clkspec)
2982 return ERR_PTR(-EINVAL);
2983
Grant Likely766e6a42012-04-09 14:50:06 -05002984 /* Check if we have such a provider in our array */
Stephen Boyd306c3422015-02-05 15:39:11 -08002985 mutex_lock(&of_clk_mutex);
Grant Likely766e6a42012-04-09 14:50:06 -05002986 list_for_each_entry(provider, &of_clk_providers, link) {
2987 if (provider->node == clkspec->np)
2988 clk = provider->get(clkspec, provider->data);
Stephen Boyd73e0e492015-02-06 11:42:43 -08002989 if (!IS_ERR(clk)) {
2990 clk = __clk_create_clk(__clk_get_hw(clk), dev_id,
2991 con_id);
2992
2993 if (!IS_ERR(clk) && !__clk_get(clk)) {
2994 __clk_free_clk(clk);
2995 clk = ERR_PTR(-ENOENT);
2996 }
2997
Grant Likely766e6a42012-04-09 14:50:06 -05002998 break;
Stephen Boyd73e0e492015-02-06 11:42:43 -08002999 }
Grant Likely766e6a42012-04-09 14:50:06 -05003000 }
Stephen Boyd306c3422015-02-05 15:39:11 -08003001 mutex_unlock(&of_clk_mutex);
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02003002
3003 return clk;
3004}
3005
Stephen Boyd306c3422015-02-05 15:39:11 -08003006/**
3007 * of_clk_get_from_provider() - Lookup a clock from a clock provider
3008 * @clkspec: pointer to a clock specifier data structure
3009 *
3010 * This function looks up a struct clk from the registered list of clock
3011 * providers, an input is a clock specifier data structure as returned
3012 * from the of_parse_phandle_with_args() function call.
3013 */
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02003014struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
3015{
Stephen Boyd306c3422015-02-05 15:39:11 -08003016 return __of_clk_get_from_provider(clkspec, NULL, __func__);
Grant Likely766e6a42012-04-09 14:50:06 -05003017}
3018
Mike Turquettef6102742013-10-07 23:12:13 -07003019int of_clk_get_parent_count(struct device_node *np)
3020{
3021 return of_count_phandle_with_args(np, "clocks", "#clock-cells");
3022}
3023EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
3024
Grant Likely766e6a42012-04-09 14:50:06 -05003025const char *of_clk_get_parent_name(struct device_node *np, int index)
3026{
3027 struct of_phandle_args clkspec;
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00003028 struct property *prop;
Grant Likely766e6a42012-04-09 14:50:06 -05003029 const char *clk_name;
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00003030 const __be32 *vp;
3031 u32 pv;
Grant Likely766e6a42012-04-09 14:50:06 -05003032 int rc;
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00003033 int count;
Grant Likely766e6a42012-04-09 14:50:06 -05003034
3035 if (index < 0)
3036 return NULL;
3037
3038 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
3039 &clkspec);
3040 if (rc)
3041 return NULL;
3042
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00003043 index = clkspec.args_count ? clkspec.args[0] : 0;
3044 count = 0;
3045
3046 /* if there is an indices property, use it to transfer the index
3047 * specified into an array offset for the clock-output-names property.
3048 */
3049 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
3050 if (index == pv) {
3051 index = count;
3052 break;
3053 }
3054 count++;
3055 }
3056
Grant Likely766e6a42012-04-09 14:50:06 -05003057 if (of_property_read_string_index(clkspec.np, "clock-output-names",
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00003058 index,
Grant Likely766e6a42012-04-09 14:50:06 -05003059 &clk_name) < 0)
3060 clk_name = clkspec.np->name;
3061
3062 of_node_put(clkspec.np);
3063 return clk_name;
3064}
3065EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
3066
Dinh Nguyen2e61dfb2015-06-05 11:26:13 -05003067/**
3068 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
3069 * number of parents
3070 * @np: Device node pointer associated with clock provider
3071 * @parents: pointer to char array that hold the parents' names
3072 * @size: size of the @parents array
3073 *
3074 * Return: number of parents for the clock node.
3075 */
3076int of_clk_parent_fill(struct device_node *np, const char **parents,
3077 unsigned int size)
3078{
3079 unsigned int i = 0;
3080
3081 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
3082 i++;
3083
3084 return i;
3085}
3086EXPORT_SYMBOL_GPL(of_clk_parent_fill);
3087
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003088struct clock_provider {
3089 of_clk_init_cb_t clk_init_cb;
3090 struct device_node *np;
3091 struct list_head node;
3092};
3093
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003094/*
3095 * This function looks for a parent clock. If there is one, then it
3096 * checks that the provider for this parent clock was initialized, in
3097 * this case the parent clock will be ready.
3098 */
3099static int parent_ready(struct device_node *np)
3100{
3101 int i = 0;
3102
3103 while (true) {
3104 struct clk *clk = of_clk_get(np, i);
3105
3106 /* this parent is ready we can check the next one */
3107 if (!IS_ERR(clk)) {
3108 clk_put(clk);
3109 i++;
3110 continue;
3111 }
3112
3113 /* at least one parent is not ready, we exit now */
3114 if (PTR_ERR(clk) == -EPROBE_DEFER)
3115 return 0;
3116
3117 /*
3118 * Here we make assumption that the device tree is
3119 * written correctly. So an error means that there is
3120 * no more parent. As we didn't exit yet, then the
3121 * previous parent are ready. If there is no clock
3122 * parent, no need to wait for them, then we can
3123 * consider their absence as being ready
3124 */
3125 return 1;
3126 }
3127}
3128
Grant Likely766e6a42012-04-09 14:50:06 -05003129/**
3130 * of_clk_init() - Scan and init clock providers from the DT
3131 * @matches: array of compatible values and init functions for providers.
3132 *
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003133 * This function scans the device tree for matching clock providers
Sylwester Nawrockie5ca8fb2014-03-27 12:08:36 +01003134 * and calls their initialization functions. It also does it by trying
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003135 * to follow the dependencies.
Grant Likely766e6a42012-04-09 14:50:06 -05003136 */
3137void __init of_clk_init(const struct of_device_id *matches)
3138{
Alex Elder7f7ed582013-08-22 11:31:31 -05003139 const struct of_device_id *match;
Grant Likely766e6a42012-04-09 14:50:06 -05003140 struct device_node *np;
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003141 struct clock_provider *clk_provider, *next;
3142 bool is_init_done;
3143 bool force = false;
Stephen Boyd2573a022015-07-06 16:50:00 -07003144 LIST_HEAD(clk_provider_list);
Grant Likely766e6a42012-04-09 14:50:06 -05003145
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +05303146 if (!matches)
Tero Kristo819b4862013-10-22 11:39:36 +03003147 matches = &__clk_of_table;
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +05303148
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003149 /* First prepare the list of the clocks providers */
Alex Elder7f7ed582013-08-22 11:31:31 -05003150 for_each_matching_node_and_match(np, matches, &match) {
Stephen Boyd2e3b19f2015-07-06 16:48:19 -07003151 struct clock_provider *parent;
3152
3153 parent = kzalloc(sizeof(*parent), GFP_KERNEL);
3154 if (!parent) {
3155 list_for_each_entry_safe(clk_provider, next,
3156 &clk_provider_list, node) {
3157 list_del(&clk_provider->node);
3158 kfree(clk_provider);
3159 }
3160 return;
3161 }
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003162
3163 parent->clk_init_cb = match->data;
3164 parent->np = np;
Sylwester Nawrocki3f6d4392014-03-27 11:43:32 +01003165 list_add_tail(&parent->node, &clk_provider_list);
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003166 }
3167
3168 while (!list_empty(&clk_provider_list)) {
3169 is_init_done = false;
3170 list_for_each_entry_safe(clk_provider, next,
3171 &clk_provider_list, node) {
3172 if (force || parent_ready(clk_provider->np)) {
Sylwester Nawrocki86be4082014-06-18 17:29:32 +02003173
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003174 clk_provider->clk_init_cb(clk_provider->np);
Sylwester Nawrocki86be4082014-06-18 17:29:32 +02003175 of_clk_set_defaults(clk_provider->np, true);
3176
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003177 list_del(&clk_provider->node);
3178 kfree(clk_provider);
3179 is_init_done = true;
3180 }
3181 }
3182
3183 /*
Sylwester Nawrockie5ca8fb2014-03-27 12:08:36 +01003184 * We didn't manage to initialize any of the
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003185 * remaining providers during the last loop, so now we
3186 * initialize all the remaining ones unconditionally
3187 * in case the clock parent was not mandatory
3188 */
3189 if (!is_init_done)
3190 force = true;
Grant Likely766e6a42012-04-09 14:50:06 -05003191 }
3192}
3193#endif