blob: 4912c2e55e2c475c7f6c6763e9a3b9891cdbd521 [file] [log] [blame]
Mike Turquetteb24764902012-03-15 23:11:19 -07001/*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Standard functionality for the common clock API. See Documentation/clk.txt
10 */
11
Stephen Boyd3c373112015-06-19 15:00:46 -070012#include <linux/clk.h>
Michael Turquetteb09d6d92015-01-29 14:22:50 -080013#include <linux/clk-provider.h>
Sylwester Nawrocki86be4082014-06-18 17:29:32 +020014#include <linux/clk/clk-conf.h>
Mike Turquetteb24764902012-03-15 23:11:19 -070015#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/spinlock.h>
18#include <linux/err.h>
19#include <linux/list.h>
20#include <linux/slab.h>
Grant Likely766e6a42012-04-09 14:50:06 -050021#include <linux/of.h>
Stephen Boyd46c87732012-09-24 13:38:04 -070022#include <linux/device.h>
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +053023#include <linux/init.h>
Mike Turquette533ddeb2013-03-28 13:59:02 -070024#include <linux/sched.h>
Stephen Boyd562ef0b2015-05-01 12:16:14 -070025#include <linux/clkdev.h>
Mike Turquetteb24764902012-03-15 23:11:19 -070026
Sylwester Nawrockid6782c22013-08-23 17:03:43 +020027#include "clk.h"
28
Mike Turquetteb24764902012-03-15 23:11:19 -070029static DEFINE_SPINLOCK(enable_lock);
30static DEFINE_MUTEX(prepare_lock);
31
Mike Turquette533ddeb2013-03-28 13:59:02 -070032static struct task_struct *prepare_owner;
33static struct task_struct *enable_owner;
34
35static int prepare_refcnt;
36static int enable_refcnt;
37
Mike Turquetteb24764902012-03-15 23:11:19 -070038static HLIST_HEAD(clk_root_list);
39static HLIST_HEAD(clk_orphan_list);
40static LIST_HEAD(clk_notifier_list);
41
Michael Turquetteb09d6d92015-01-29 14:22:50 -080042/*** private data structures ***/
43
44struct clk_core {
45 const char *name;
46 const struct clk_ops *ops;
47 struct clk_hw *hw;
48 struct module *owner;
49 struct clk_core *parent;
50 const char **parent_names;
51 struct clk_core **parents;
52 u8 num_parents;
53 u8 new_parent_index;
54 unsigned long rate;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +010055 unsigned long req_rate;
Michael Turquetteb09d6d92015-01-29 14:22:50 -080056 unsigned long new_rate;
57 struct clk_core *new_parent;
58 struct clk_core *new_child;
59 unsigned long flags;
Heiko Stuebnere6500342015-04-22 22:53:05 +020060 bool orphan;
Michael Turquetteb09d6d92015-01-29 14:22:50 -080061 unsigned int enable_count;
62 unsigned int prepare_count;
Stephen Boyd9783c0d2015-07-16 12:50:27 -070063 unsigned long min_rate;
64 unsigned long max_rate;
Michael Turquetteb09d6d92015-01-29 14:22:50 -080065 unsigned long accuracy;
66 int phase;
67 struct hlist_head children;
68 struct hlist_node child_node;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +010069 struct hlist_head clks;
Michael Turquetteb09d6d92015-01-29 14:22:50 -080070 unsigned int notifier_count;
71#ifdef CONFIG_DEBUG_FS
72 struct dentry *dentry;
Maxime Coquelin8c9a8a82015-06-10 13:28:27 +020073 struct hlist_node debug_node;
Michael Turquetteb09d6d92015-01-29 14:22:50 -080074#endif
75 struct kref ref;
76};
77
Stephen Boyddfc202e2015-02-02 14:37:41 -080078#define CREATE_TRACE_POINTS
79#include <trace/events/clk.h>
80
Michael Turquetteb09d6d92015-01-29 14:22:50 -080081struct clk {
82 struct clk_core *core;
83 const char *dev_id;
84 const char *con_id;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +010085 unsigned long min_rate;
86 unsigned long max_rate;
Stephen Boyd50595f82015-02-06 11:42:44 -080087 struct hlist_node clks_node;
Michael Turquetteb09d6d92015-01-29 14:22:50 -080088};
89
Mike Turquetteeab89f62013-03-28 13:59:01 -070090/*** locking ***/
91static void clk_prepare_lock(void)
92{
Mike Turquette533ddeb2013-03-28 13:59:02 -070093 if (!mutex_trylock(&prepare_lock)) {
94 if (prepare_owner == current) {
95 prepare_refcnt++;
96 return;
97 }
98 mutex_lock(&prepare_lock);
99 }
100 WARN_ON_ONCE(prepare_owner != NULL);
101 WARN_ON_ONCE(prepare_refcnt != 0);
102 prepare_owner = current;
103 prepare_refcnt = 1;
Mike Turquetteeab89f62013-03-28 13:59:01 -0700104}
105
106static void clk_prepare_unlock(void)
107{
Mike Turquette533ddeb2013-03-28 13:59:02 -0700108 WARN_ON_ONCE(prepare_owner != current);
109 WARN_ON_ONCE(prepare_refcnt == 0);
110
111 if (--prepare_refcnt)
112 return;
113 prepare_owner = NULL;
Mike Turquetteeab89f62013-03-28 13:59:01 -0700114 mutex_unlock(&prepare_lock);
115}
116
117static unsigned long clk_enable_lock(void)
Stephen Boyda57aa182015-07-24 12:24:48 -0700118 __acquires(enable_lock)
Mike Turquetteeab89f62013-03-28 13:59:01 -0700119{
120 unsigned long flags;
Mike Turquette533ddeb2013-03-28 13:59:02 -0700121
122 if (!spin_trylock_irqsave(&enable_lock, flags)) {
123 if (enable_owner == current) {
124 enable_refcnt++;
Stephen Boyda57aa182015-07-24 12:24:48 -0700125 __acquire(enable_lock);
Mike Turquette533ddeb2013-03-28 13:59:02 -0700126 return flags;
127 }
128 spin_lock_irqsave(&enable_lock, flags);
129 }
130 WARN_ON_ONCE(enable_owner != NULL);
131 WARN_ON_ONCE(enable_refcnt != 0);
132 enable_owner = current;
133 enable_refcnt = 1;
Mike Turquetteeab89f62013-03-28 13:59:01 -0700134 return flags;
135}
136
137static void clk_enable_unlock(unsigned long flags)
Stephen Boyda57aa182015-07-24 12:24:48 -0700138 __releases(enable_lock)
Mike Turquetteeab89f62013-03-28 13:59:01 -0700139{
Mike Turquette533ddeb2013-03-28 13:59:02 -0700140 WARN_ON_ONCE(enable_owner != current);
141 WARN_ON_ONCE(enable_refcnt == 0);
142
Stephen Boyda57aa182015-07-24 12:24:48 -0700143 if (--enable_refcnt) {
144 __release(enable_lock);
Mike Turquette533ddeb2013-03-28 13:59:02 -0700145 return;
Stephen Boyda57aa182015-07-24 12:24:48 -0700146 }
Mike Turquette533ddeb2013-03-28 13:59:02 -0700147 enable_owner = NULL;
Mike Turquetteeab89f62013-03-28 13:59:01 -0700148 spin_unlock_irqrestore(&enable_lock, flags);
149}
150
Stephen Boyd4dff95d2015-04-30 14:43:22 -0700151static bool clk_core_is_prepared(struct clk_core *core)
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530152{
Stephen Boyd4dff95d2015-04-30 14:43:22 -0700153 /*
154 * .is_prepared is optional for clocks that can prepare
155 * fall back to software usage counter if it is missing
156 */
157 if (!core->ops->is_prepared)
158 return core->prepare_count;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530159
Stephen Boyd4dff95d2015-04-30 14:43:22 -0700160 return core->ops->is_prepared(core->hw);
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530161}
162
Stephen Boyd4dff95d2015-04-30 14:43:22 -0700163static bool clk_core_is_enabled(struct clk_core *core)
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530164{
Stephen Boyd4dff95d2015-04-30 14:43:22 -0700165 /*
166 * .is_enabled is only mandatory for clocks that gate
167 * fall back to software usage counter if .is_enabled is missing
168 */
169 if (!core->ops->is_enabled)
170 return core->enable_count;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530171
Stephen Boyd4dff95d2015-04-30 14:43:22 -0700172 return core->ops->is_enabled(core->hw);
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530173}
174
Stephen Boydd6968fc2015-04-30 13:54:13 -0700175static void clk_unprepare_unused_subtree(struct clk_core *core)
Ulf Hansson1c155b32013-03-12 20:26:03 +0100176{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100177 struct clk_core *child;
Ulf Hansson1c155b32013-03-12 20:26:03 +0100178
Krzysztof Kozlowski496eadf2015-01-09 09:28:10 +0100179 lockdep_assert_held(&prepare_lock);
180
Stephen Boydd6968fc2015-04-30 13:54:13 -0700181 hlist_for_each_entry(child, &core->children, child_node)
Ulf Hansson1c155b32013-03-12 20:26:03 +0100182 clk_unprepare_unused_subtree(child);
183
Stephen Boydd6968fc2015-04-30 13:54:13 -0700184 if (core->prepare_count)
Ulf Hansson1c155b32013-03-12 20:26:03 +0100185 return;
186
Stephen Boydd6968fc2015-04-30 13:54:13 -0700187 if (core->flags & CLK_IGNORE_UNUSED)
Ulf Hansson1c155b32013-03-12 20:26:03 +0100188 return;
189
Stephen Boydd6968fc2015-04-30 13:54:13 -0700190 if (clk_core_is_prepared(core)) {
191 trace_clk_unprepare(core);
192 if (core->ops->unprepare_unused)
193 core->ops->unprepare_unused(core->hw);
194 else if (core->ops->unprepare)
195 core->ops->unprepare(core->hw);
196 trace_clk_unprepare_complete(core);
Ulf Hansson3cc82472013-03-12 20:26:04 +0100197 }
Ulf Hansson1c155b32013-03-12 20:26:03 +0100198}
199
Stephen Boydd6968fc2015-04-30 13:54:13 -0700200static void clk_disable_unused_subtree(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700201{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100202 struct clk_core *child;
Mike Turquetteb24764902012-03-15 23:11:19 -0700203 unsigned long flags;
204
Krzysztof Kozlowski496eadf2015-01-09 09:28:10 +0100205 lockdep_assert_held(&prepare_lock);
206
Stephen Boydd6968fc2015-04-30 13:54:13 -0700207 hlist_for_each_entry(child, &core->children, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700208 clk_disable_unused_subtree(child);
209
Mike Turquetteeab89f62013-03-28 13:59:01 -0700210 flags = clk_enable_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700211
Stephen Boydd6968fc2015-04-30 13:54:13 -0700212 if (core->enable_count)
Mike Turquetteb24764902012-03-15 23:11:19 -0700213 goto unlock_out;
214
Stephen Boydd6968fc2015-04-30 13:54:13 -0700215 if (core->flags & CLK_IGNORE_UNUSED)
Mike Turquetteb24764902012-03-15 23:11:19 -0700216 goto unlock_out;
217
Mike Turquette7c045a52012-12-04 11:00:35 -0800218 /*
219 * some gate clocks have special needs during the disable-unused
220 * sequence. call .disable_unused if available, otherwise fall
221 * back to .disable
222 */
Stephen Boydd6968fc2015-04-30 13:54:13 -0700223 if (clk_core_is_enabled(core)) {
224 trace_clk_disable(core);
225 if (core->ops->disable_unused)
226 core->ops->disable_unused(core->hw);
227 else if (core->ops->disable)
228 core->ops->disable(core->hw);
229 trace_clk_disable_complete(core);
Mike Turquette7c045a52012-12-04 11:00:35 -0800230 }
Mike Turquetteb24764902012-03-15 23:11:19 -0700231
232unlock_out:
Mike Turquetteeab89f62013-03-28 13:59:01 -0700233 clk_enable_unlock(flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700234}
235
Olof Johansson1e435252013-04-27 14:10:18 -0700236static bool clk_ignore_unused;
237static int __init clk_ignore_unused_setup(char *__unused)
238{
239 clk_ignore_unused = true;
240 return 1;
241}
242__setup("clk_ignore_unused", clk_ignore_unused_setup);
243
Mike Turquetteb24764902012-03-15 23:11:19 -0700244static int clk_disable_unused(void)
245{
Stephen Boydd6968fc2015-04-30 13:54:13 -0700246 struct clk_core *core;
Mike Turquetteb24764902012-03-15 23:11:19 -0700247
Olof Johansson1e435252013-04-27 14:10:18 -0700248 if (clk_ignore_unused) {
249 pr_warn("clk: Not disabling unused clocks\n");
250 return 0;
251 }
252
Mike Turquetteeab89f62013-03-28 13:59:01 -0700253 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700254
Stephen Boydd6968fc2015-04-30 13:54:13 -0700255 hlist_for_each_entry(core, &clk_root_list, child_node)
256 clk_disable_unused_subtree(core);
Mike Turquetteb24764902012-03-15 23:11:19 -0700257
Stephen Boydd6968fc2015-04-30 13:54:13 -0700258 hlist_for_each_entry(core, &clk_orphan_list, child_node)
259 clk_disable_unused_subtree(core);
Mike Turquetteb24764902012-03-15 23:11:19 -0700260
Stephen Boydd6968fc2015-04-30 13:54:13 -0700261 hlist_for_each_entry(core, &clk_root_list, child_node)
262 clk_unprepare_unused_subtree(core);
Ulf Hansson1c155b32013-03-12 20:26:03 +0100263
Stephen Boydd6968fc2015-04-30 13:54:13 -0700264 hlist_for_each_entry(core, &clk_orphan_list, child_node)
265 clk_unprepare_unused_subtree(core);
Ulf Hansson1c155b32013-03-12 20:26:03 +0100266
Mike Turquetteeab89f62013-03-28 13:59:01 -0700267 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700268
269 return 0;
270}
Saravana Kannand41d5802013-05-09 11:35:01 -0700271late_initcall_sync(clk_disable_unused);
Mike Turquetteb24764902012-03-15 23:11:19 -0700272
273/*** helper functions ***/
274
Russ Dill65800b22012-11-26 11:20:09 -0800275const char *__clk_get_name(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700276{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100277 return !clk ? NULL : clk->core->name;
Mike Turquetteb24764902012-03-15 23:11:19 -0700278}
Niels de Vos48950842012-12-13 13:12:25 +0100279EXPORT_SYMBOL_GPL(__clk_get_name);
Mike Turquetteb24764902012-03-15 23:11:19 -0700280
Russ Dill65800b22012-11-26 11:20:09 -0800281struct clk_hw *__clk_get_hw(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700282{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100283 return !clk ? NULL : clk->core->hw;
Mike Turquetteb24764902012-03-15 23:11:19 -0700284}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800285EXPORT_SYMBOL_GPL(__clk_get_hw);
Mike Turquetteb24764902012-03-15 23:11:19 -0700286
Russ Dill65800b22012-11-26 11:20:09 -0800287u8 __clk_get_num_parents(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700288{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100289 return !clk ? 0 : clk->core->num_parents;
Mike Turquetteb24764902012-03-15 23:11:19 -0700290}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800291EXPORT_SYMBOL_GPL(__clk_get_num_parents);
Mike Turquetteb24764902012-03-15 23:11:19 -0700292
Russ Dill65800b22012-11-26 11:20:09 -0800293struct clk *__clk_get_parent(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700294{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100295 if (!clk)
296 return NULL;
297
298 /* TODO: Create a per-user clk and change callers to call clk_put */
299 return !clk->core->parent ? NULL : clk->core->parent->hw->clk;
Mike Turquetteb24764902012-03-15 23:11:19 -0700300}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800301EXPORT_SYMBOL_GPL(__clk_get_parent);
Mike Turquetteb24764902012-03-15 23:11:19 -0700302
Stephen Boyd4dff95d2015-04-30 14:43:22 -0700303static struct clk_core *__clk_lookup_subtree(const char *name,
304 struct clk_core *core)
305{
306 struct clk_core *child;
307 struct clk_core *ret;
308
309 if (!strcmp(core->name, name))
310 return core;
311
312 hlist_for_each_entry(child, &core->children, child_node) {
313 ret = __clk_lookup_subtree(name, child);
314 if (ret)
315 return ret;
316 }
317
318 return NULL;
319}
320
321static struct clk_core *clk_core_lookup(const char *name)
322{
323 struct clk_core *root_clk;
324 struct clk_core *ret;
325
326 if (!name)
327 return NULL;
328
329 /* search the 'proper' clk tree first */
330 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
331 ret = __clk_lookup_subtree(name, root_clk);
332 if (ret)
333 return ret;
334 }
335
336 /* if not found, then search the orphan tree */
337 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
338 ret = __clk_lookup_subtree(name, root_clk);
339 if (ret)
340 return ret;
341 }
342
343 return NULL;
344}
345
Stephen Boydd6968fc2015-04-30 13:54:13 -0700346static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100347 u8 index)
James Hogan7ef3dcc2013-07-29 12:24:58 +0100348{
Stephen Boydd6968fc2015-04-30 13:54:13 -0700349 if (!core || index >= core->num_parents)
James Hogan7ef3dcc2013-07-29 12:24:58 +0100350 return NULL;
Stephen Boydd6968fc2015-04-30 13:54:13 -0700351 else if (!core->parents)
352 return clk_core_lookup(core->parent_names[index]);
353 else if (!core->parents[index])
354 return core->parents[index] =
355 clk_core_lookup(core->parent_names[index]);
James Hogan7ef3dcc2013-07-29 12:24:58 +0100356 else
Stephen Boydd6968fc2015-04-30 13:54:13 -0700357 return core->parents[index];
James Hogan7ef3dcc2013-07-29 12:24:58 +0100358}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100359
360struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
361{
362 struct clk_core *parent;
363
364 if (!clk)
365 return NULL;
366
367 parent = clk_core_get_parent_by_index(clk->core, index);
368
369 return !parent ? NULL : parent->hw->clk;
370}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800371EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
James Hogan7ef3dcc2013-07-29 12:24:58 +0100372
Russ Dill65800b22012-11-26 11:20:09 -0800373unsigned int __clk_get_enable_count(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700374{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100375 return !clk ? 0 : clk->core->enable_count;
Mike Turquetteb24764902012-03-15 23:11:19 -0700376}
377
Stephen Boydd6968fc2015-04-30 13:54:13 -0700378static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700379{
380 unsigned long ret;
381
Stephen Boydd6968fc2015-04-30 13:54:13 -0700382 if (!core) {
Rajendra Nayak34e44fe2012-03-26 19:01:48 +0530383 ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700384 goto out;
385 }
386
Stephen Boydd6968fc2015-04-30 13:54:13 -0700387 ret = core->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -0700388
Stephen Boydd6968fc2015-04-30 13:54:13 -0700389 if (core->flags & CLK_IS_ROOT)
Mike Turquetteb24764902012-03-15 23:11:19 -0700390 goto out;
391
Stephen Boydd6968fc2015-04-30 13:54:13 -0700392 if (!core->parent)
Rajendra Nayak34e44fe2012-03-26 19:01:48 +0530393 ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700394
395out:
396 return ret;
397}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100398
399unsigned long __clk_get_rate(struct clk *clk)
400{
401 if (!clk)
402 return 0;
403
404 return clk_core_get_rate_nolock(clk->core);
405}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800406EXPORT_SYMBOL_GPL(__clk_get_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -0700407
Stephen Boydd6968fc2015-04-30 13:54:13 -0700408static unsigned long __clk_get_accuracy(struct clk_core *core)
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100409{
Stephen Boydd6968fc2015-04-30 13:54:13 -0700410 if (!core)
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100411 return 0;
412
Stephen Boydd6968fc2015-04-30 13:54:13 -0700413 return core->accuracy;
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100414}
415
Russ Dill65800b22012-11-26 11:20:09 -0800416unsigned long __clk_get_flags(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700417{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100418 return !clk ? 0 : clk->core->flags;
Mike Turquetteb24764902012-03-15 23:11:19 -0700419}
Thierry Redingb05c6832013-09-03 09:43:51 +0200420EXPORT_SYMBOL_GPL(__clk_get_flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700421
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100422bool __clk_is_prepared(struct clk *clk)
423{
424 if (!clk)
425 return false;
426
427 return clk_core_is_prepared(clk->core);
428}
429
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100430bool __clk_is_enabled(struct clk *clk)
431{
432 if (!clk)
433 return false;
434
435 return clk_core_is_enabled(clk->core);
436}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800437EXPORT_SYMBOL_GPL(__clk_is_enabled);
Mike Turquetteb24764902012-03-15 23:11:19 -0700438
Stephen Boyd15a02c12015-01-19 18:05:28 -0800439static bool mux_is_better_rate(unsigned long rate, unsigned long now,
440 unsigned long best, unsigned long flags)
James Hogane366fdd2013-07-29 12:25:02 +0100441{
Stephen Boyd15a02c12015-01-19 18:05:28 -0800442 if (flags & CLK_MUX_ROUND_CLOSEST)
443 return abs(now - rate) < abs(best - rate);
444
445 return now <= rate && now > best;
446}
447
Boris Brezillon0817b622015-07-07 20:48:08 +0200448static int
449clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req,
Stephen Boyd15a02c12015-01-19 18:05:28 -0800450 unsigned long flags)
James Hogane366fdd2013-07-29 12:25:02 +0100451{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100452 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
Boris Brezillon0817b622015-07-07 20:48:08 +0200453 int i, num_parents, ret;
454 unsigned long best = 0;
455 struct clk_rate_request parent_req = *req;
James Hogane366fdd2013-07-29 12:25:02 +0100456
457 /* if NO_REPARENT flag set, pass through to current parent */
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100458 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
459 parent = core->parent;
Boris Brezillon0817b622015-07-07 20:48:08 +0200460 if (core->flags & CLK_SET_RATE_PARENT) {
461 ret = __clk_determine_rate(parent ? parent->hw : NULL,
462 &parent_req);
463 if (ret)
464 return ret;
465
466 best = parent_req.rate;
467 } else if (parent) {
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100468 best = clk_core_get_rate_nolock(parent);
Boris Brezillon0817b622015-07-07 20:48:08 +0200469 } else {
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100470 best = clk_core_get_rate_nolock(core);
Boris Brezillon0817b622015-07-07 20:48:08 +0200471 }
472
James Hogane366fdd2013-07-29 12:25:02 +0100473 goto out;
474 }
475
476 /* find the parent that can provide the fastest rate <= rate */
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100477 num_parents = core->num_parents;
James Hogane366fdd2013-07-29 12:25:02 +0100478 for (i = 0; i < num_parents; i++) {
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100479 parent = clk_core_get_parent_by_index(core, i);
James Hogane366fdd2013-07-29 12:25:02 +0100480 if (!parent)
481 continue;
Boris Brezillon0817b622015-07-07 20:48:08 +0200482
483 if (core->flags & CLK_SET_RATE_PARENT) {
484 parent_req = *req;
485 ret = __clk_determine_rate(parent->hw, &parent_req);
486 if (ret)
487 continue;
488 } else {
489 parent_req.rate = clk_core_get_rate_nolock(parent);
490 }
491
492 if (mux_is_better_rate(req->rate, parent_req.rate,
493 best, flags)) {
James Hogane366fdd2013-07-29 12:25:02 +0100494 best_parent = parent;
Boris Brezillon0817b622015-07-07 20:48:08 +0200495 best = parent_req.rate;
James Hogane366fdd2013-07-29 12:25:02 +0100496 }
497 }
498
Boris Brezillon57d866e2015-07-09 22:39:38 +0200499 if (!best_parent)
500 return -EINVAL;
501
James Hogane366fdd2013-07-29 12:25:02 +0100502out:
503 if (best_parent)
Boris Brezillon0817b622015-07-07 20:48:08 +0200504 req->best_parent_hw = best_parent->hw;
505 req->best_parent_rate = best;
506 req->rate = best;
James Hogane366fdd2013-07-29 12:25:02 +0100507
Boris Brezillon0817b622015-07-07 20:48:08 +0200508 return 0;
James Hogane366fdd2013-07-29 12:25:02 +0100509}
Stephen Boyd15a02c12015-01-19 18:05:28 -0800510
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100511struct clk *__clk_lookup(const char *name)
512{
513 struct clk_core *core = clk_core_lookup(name);
514
515 return !core ? NULL : core->hw->clk;
516}
517
Stephen Boydd6968fc2015-04-30 13:54:13 -0700518static void clk_core_get_boundaries(struct clk_core *core,
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100519 unsigned long *min_rate,
520 unsigned long *max_rate)
521{
522 struct clk *clk_user;
523
Stephen Boyd9783c0d2015-07-16 12:50:27 -0700524 *min_rate = core->min_rate;
525 *max_rate = core->max_rate;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100526
Stephen Boydd6968fc2015-04-30 13:54:13 -0700527 hlist_for_each_entry(clk_user, &core->clks, clks_node)
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100528 *min_rate = max(*min_rate, clk_user->min_rate);
529
Stephen Boydd6968fc2015-04-30 13:54:13 -0700530 hlist_for_each_entry(clk_user, &core->clks, clks_node)
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100531 *max_rate = min(*max_rate, clk_user->max_rate);
532}
533
Stephen Boyd9783c0d2015-07-16 12:50:27 -0700534void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
535 unsigned long max_rate)
536{
537 hw->core->min_rate = min_rate;
538 hw->core->max_rate = max_rate;
539}
540EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
541
Stephen Boyd15a02c12015-01-19 18:05:28 -0800542/*
543 * Helper for finding best parent to provide a given frequency. This can be used
544 * directly as a determine_rate callback (e.g. for a mux), or from a more
545 * complex clock that may combine a mux with other operations.
546 */
Boris Brezillon0817b622015-07-07 20:48:08 +0200547int __clk_mux_determine_rate(struct clk_hw *hw,
548 struct clk_rate_request *req)
Stephen Boyd15a02c12015-01-19 18:05:28 -0800549{
Boris Brezillon0817b622015-07-07 20:48:08 +0200550 return clk_mux_determine_rate_flags(hw, req, 0);
Stephen Boyd15a02c12015-01-19 18:05:28 -0800551}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800552EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
James Hogane366fdd2013-07-29 12:25:02 +0100553
Boris Brezillon0817b622015-07-07 20:48:08 +0200554int __clk_mux_determine_rate_closest(struct clk_hw *hw,
555 struct clk_rate_request *req)
Stephen Boyd15a02c12015-01-19 18:05:28 -0800556{
Boris Brezillon0817b622015-07-07 20:48:08 +0200557 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
Stephen Boyd15a02c12015-01-19 18:05:28 -0800558}
559EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
560
Mike Turquetteb24764902012-03-15 23:11:19 -0700561/*** clk api ***/
562
Stephen Boydd6968fc2015-04-30 13:54:13 -0700563static void clk_core_unprepare(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700564{
Stephen Boyda6334722015-05-06 17:00:54 -0700565 lockdep_assert_held(&prepare_lock);
566
Stephen Boydd6968fc2015-04-30 13:54:13 -0700567 if (!core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700568 return;
569
Stephen Boydd6968fc2015-04-30 13:54:13 -0700570 if (WARN_ON(core->prepare_count == 0))
Mike Turquetteb24764902012-03-15 23:11:19 -0700571 return;
572
Stephen Boydd6968fc2015-04-30 13:54:13 -0700573 if (--core->prepare_count > 0)
Mike Turquetteb24764902012-03-15 23:11:19 -0700574 return;
575
Stephen Boydd6968fc2015-04-30 13:54:13 -0700576 WARN_ON(core->enable_count > 0);
Mike Turquetteb24764902012-03-15 23:11:19 -0700577
Stephen Boydd6968fc2015-04-30 13:54:13 -0700578 trace_clk_unprepare(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800579
Stephen Boydd6968fc2015-04-30 13:54:13 -0700580 if (core->ops->unprepare)
581 core->ops->unprepare(core->hw);
Mike Turquetteb24764902012-03-15 23:11:19 -0700582
Stephen Boydd6968fc2015-04-30 13:54:13 -0700583 trace_clk_unprepare_complete(core);
584 clk_core_unprepare(core->parent);
Mike Turquetteb24764902012-03-15 23:11:19 -0700585}
586
587/**
588 * clk_unprepare - undo preparation of a clock source
Peter Meerwald24ee1a02013-06-29 15:14:19 +0200589 * @clk: the clk being unprepared
Mike Turquetteb24764902012-03-15 23:11:19 -0700590 *
591 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
592 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
593 * if the operation may sleep. One example is a clk which is accessed over
594 * I2c. In the complex case a clk gate operation may require a fast and a slow
595 * part. It is this reason that clk_unprepare and clk_disable are not mutually
596 * exclusive. In fact clk_disable must be called before clk_unprepare.
597 */
598void clk_unprepare(struct clk *clk)
599{
Stephen Boyd63589e92014-03-26 16:06:37 -0700600 if (IS_ERR_OR_NULL(clk))
601 return;
602
Mike Turquetteeab89f62013-03-28 13:59:01 -0700603 clk_prepare_lock();
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100604 clk_core_unprepare(clk->core);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700605 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700606}
607EXPORT_SYMBOL_GPL(clk_unprepare);
608
Stephen Boydd6968fc2015-04-30 13:54:13 -0700609static int clk_core_prepare(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700610{
611 int ret = 0;
612
Stephen Boyda6334722015-05-06 17:00:54 -0700613 lockdep_assert_held(&prepare_lock);
614
Stephen Boydd6968fc2015-04-30 13:54:13 -0700615 if (!core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700616 return 0;
617
Stephen Boydd6968fc2015-04-30 13:54:13 -0700618 if (core->prepare_count == 0) {
619 ret = clk_core_prepare(core->parent);
Mike Turquetteb24764902012-03-15 23:11:19 -0700620 if (ret)
621 return ret;
622
Stephen Boydd6968fc2015-04-30 13:54:13 -0700623 trace_clk_prepare(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800624
Stephen Boydd6968fc2015-04-30 13:54:13 -0700625 if (core->ops->prepare)
626 ret = core->ops->prepare(core->hw);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800627
Stephen Boydd6968fc2015-04-30 13:54:13 -0700628 trace_clk_prepare_complete(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800629
630 if (ret) {
Stephen Boydd6968fc2015-04-30 13:54:13 -0700631 clk_core_unprepare(core->parent);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800632 return ret;
Mike Turquetteb24764902012-03-15 23:11:19 -0700633 }
634 }
635
Stephen Boydd6968fc2015-04-30 13:54:13 -0700636 core->prepare_count++;
Mike Turquetteb24764902012-03-15 23:11:19 -0700637
638 return 0;
639}
640
641/**
642 * clk_prepare - prepare a clock source
643 * @clk: the clk being prepared
644 *
645 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
646 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
647 * operation may sleep. One example is a clk which is accessed over I2c. In
648 * the complex case a clk ungate operation may require a fast and a slow part.
649 * It is this reason that clk_prepare and clk_enable are not mutually
650 * exclusive. In fact clk_prepare must be called before clk_enable.
651 * Returns 0 on success, -EERROR otherwise.
652 */
653int clk_prepare(struct clk *clk)
654{
655 int ret;
656
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100657 if (!clk)
658 return 0;
659
Mike Turquetteeab89f62013-03-28 13:59:01 -0700660 clk_prepare_lock();
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100661 ret = clk_core_prepare(clk->core);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700662 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700663
664 return ret;
665}
666EXPORT_SYMBOL_GPL(clk_prepare);
667
Stephen Boydd6968fc2015-04-30 13:54:13 -0700668static void clk_core_disable(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700669{
Stephen Boyda6334722015-05-06 17:00:54 -0700670 lockdep_assert_held(&enable_lock);
671
Stephen Boydd6968fc2015-04-30 13:54:13 -0700672 if (!core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700673 return;
674
Stephen Boydd6968fc2015-04-30 13:54:13 -0700675 if (WARN_ON(core->enable_count == 0))
Mike Turquetteb24764902012-03-15 23:11:19 -0700676 return;
677
Stephen Boydd6968fc2015-04-30 13:54:13 -0700678 if (--core->enable_count > 0)
Mike Turquetteb24764902012-03-15 23:11:19 -0700679 return;
680
Stephen Boydd6968fc2015-04-30 13:54:13 -0700681 trace_clk_disable(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800682
Stephen Boydd6968fc2015-04-30 13:54:13 -0700683 if (core->ops->disable)
684 core->ops->disable(core->hw);
Mike Turquetteb24764902012-03-15 23:11:19 -0700685
Stephen Boydd6968fc2015-04-30 13:54:13 -0700686 trace_clk_disable_complete(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800687
Stephen Boydd6968fc2015-04-30 13:54:13 -0700688 clk_core_disable(core->parent);
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100689}
690
Mike Turquetteb24764902012-03-15 23:11:19 -0700691/**
692 * clk_disable - gate a clock
693 * @clk: the clk being gated
694 *
695 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
696 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
697 * clk if the operation is fast and will never sleep. One example is a
698 * SoC-internal clk which is controlled via simple register writes. In the
699 * complex case a clk gate operation may require a fast and a slow part. It is
700 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
701 * In fact clk_disable must be called before clk_unprepare.
702 */
703void clk_disable(struct clk *clk)
704{
705 unsigned long flags;
706
Stephen Boyd63589e92014-03-26 16:06:37 -0700707 if (IS_ERR_OR_NULL(clk))
708 return;
709
Mike Turquetteeab89f62013-03-28 13:59:01 -0700710 flags = clk_enable_lock();
Dong Aisheng864e1602015-04-30 14:02:19 -0700711 clk_core_disable(clk->core);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700712 clk_enable_unlock(flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700713}
714EXPORT_SYMBOL_GPL(clk_disable);
715
Stephen Boydd6968fc2015-04-30 13:54:13 -0700716static int clk_core_enable(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700717{
718 int ret = 0;
719
Stephen Boyda6334722015-05-06 17:00:54 -0700720 lockdep_assert_held(&enable_lock);
721
Stephen Boydd6968fc2015-04-30 13:54:13 -0700722 if (!core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700723 return 0;
724
Stephen Boydd6968fc2015-04-30 13:54:13 -0700725 if (WARN_ON(core->prepare_count == 0))
Mike Turquetteb24764902012-03-15 23:11:19 -0700726 return -ESHUTDOWN;
727
Stephen Boydd6968fc2015-04-30 13:54:13 -0700728 if (core->enable_count == 0) {
729 ret = clk_core_enable(core->parent);
Mike Turquetteb24764902012-03-15 23:11:19 -0700730
731 if (ret)
732 return ret;
733
Stephen Boydd6968fc2015-04-30 13:54:13 -0700734 trace_clk_enable(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800735
Stephen Boydd6968fc2015-04-30 13:54:13 -0700736 if (core->ops->enable)
737 ret = core->ops->enable(core->hw);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800738
Stephen Boydd6968fc2015-04-30 13:54:13 -0700739 trace_clk_enable_complete(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800740
741 if (ret) {
Stephen Boydd6968fc2015-04-30 13:54:13 -0700742 clk_core_disable(core->parent);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800743 return ret;
Mike Turquetteb24764902012-03-15 23:11:19 -0700744 }
745 }
746
Stephen Boydd6968fc2015-04-30 13:54:13 -0700747 core->enable_count++;
Mike Turquetteb24764902012-03-15 23:11:19 -0700748 return 0;
749}
750
751/**
752 * clk_enable - ungate a clock
753 * @clk: the clk being ungated
754 *
755 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
756 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
757 * if the operation will never sleep. One example is a SoC-internal clk which
758 * is controlled via simple register writes. In the complex case a clk ungate
759 * operation may require a fast and a slow part. It is this reason that
760 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
761 * must be called before clk_enable. Returns 0 on success, -EERROR
762 * otherwise.
763 */
764int clk_enable(struct clk *clk)
765{
766 unsigned long flags;
767 int ret;
768
Dong Aisheng864e1602015-04-30 14:02:19 -0700769 if (!clk)
770 return 0;
771
Mike Turquetteeab89f62013-03-28 13:59:01 -0700772 flags = clk_enable_lock();
Dong Aisheng864e1602015-04-30 14:02:19 -0700773 ret = clk_core_enable(clk->core);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700774 clk_enable_unlock(flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700775
776 return ret;
777}
778EXPORT_SYMBOL_GPL(clk_enable);
779
Boris Brezillon0817b622015-07-07 20:48:08 +0200780static int clk_core_round_rate_nolock(struct clk_core *core,
781 struct clk_rate_request *req)
Mike Turquetteb24764902012-03-15 23:11:19 -0700782{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100783 struct clk_core *parent;
Boris Brezillon0817b622015-07-07 20:48:08 +0200784 long rate;
Mike Turquetteb24764902012-03-15 23:11:19 -0700785
Krzysztof Kozlowski496eadf2015-01-09 09:28:10 +0100786 lockdep_assert_held(&prepare_lock);
787
Stephen Boydd6968fc2015-04-30 13:54:13 -0700788 if (!core)
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700789 return 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700790
Stephen Boydd6968fc2015-04-30 13:54:13 -0700791 parent = core->parent;
Boris Brezillon0817b622015-07-07 20:48:08 +0200792 if (parent) {
793 req->best_parent_hw = parent->hw;
794 req->best_parent_rate = parent->rate;
795 } else {
796 req->best_parent_hw = NULL;
797 req->best_parent_rate = 0;
798 }
Mike Turquetteb24764902012-03-15 23:11:19 -0700799
Stephen Boydd6968fc2015-04-30 13:54:13 -0700800 if (core->ops->determine_rate) {
Boris Brezillon0817b622015-07-07 20:48:08 +0200801 return core->ops->determine_rate(core->hw, req);
802 } else if (core->ops->round_rate) {
803 rate = core->ops->round_rate(core->hw, req->rate,
804 &req->best_parent_rate);
805 if (rate < 0)
806 return rate;
807
808 req->rate = rate;
809 } else if (core->flags & CLK_SET_RATE_PARENT) {
810 return clk_core_round_rate_nolock(parent, req);
811 } else {
812 req->rate = core->rate;
813 }
814
815 return 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700816}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100817
818/**
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100819 * __clk_determine_rate - get the closest rate actually supported by a clock
820 * @hw: determine the rate of this clock
821 * @rate: target rate
822 * @min_rate: returned rate must be greater than this rate
823 * @max_rate: returned rate must be less than this rate
824 *
Stephen Boyd6e5ab412015-04-30 15:11:31 -0700825 * Useful for clk_ops such as .set_rate and .determine_rate.
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100826 */
Boris Brezillon0817b622015-07-07 20:48:08 +0200827int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100828{
Boris Brezillon0817b622015-07-07 20:48:08 +0200829 if (!hw) {
830 req->rate = 0;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100831 return 0;
Boris Brezillon0817b622015-07-07 20:48:08 +0200832 }
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100833
Boris Brezillon0817b622015-07-07 20:48:08 +0200834 return clk_core_round_rate_nolock(hw->core, req);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100835}
836EXPORT_SYMBOL_GPL(__clk_determine_rate);
837
838/**
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100839 * __clk_round_rate - round the given rate for a clk
840 * @clk: round the rate of this clock
841 * @rate: the rate which is to be rounded
842 *
Stephen Boyd6e5ab412015-04-30 15:11:31 -0700843 * Useful for clk_ops such as .set_rate
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100844 */
845unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
846{
Boris Brezillon0817b622015-07-07 20:48:08 +0200847 struct clk_rate_request req;
848 int ret;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100849
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100850 if (!clk)
851 return 0;
852
Boris Brezillon0817b622015-07-07 20:48:08 +0200853 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
854 req.rate = rate;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100855
Boris Brezillon0817b622015-07-07 20:48:08 +0200856 ret = clk_core_round_rate_nolock(clk->core, &req);
857 if (ret)
858 return 0;
859
860 return req.rate;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100861}
Arnd Bergmann1cdf8ee2014-06-03 11:40:14 +0200862EXPORT_SYMBOL_GPL(__clk_round_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -0700863
864/**
865 * clk_round_rate - round the given rate for a clk
866 * @clk: the clk for which we are rounding a rate
867 * @rate: the rate which is to be rounded
868 *
869 * Takes in a rate as input and rounds it to a rate that the clk can actually
870 * use which is then returned. If clk doesn't support round_rate operation
871 * then the parent rate is returned.
872 */
873long clk_round_rate(struct clk *clk, unsigned long rate)
874{
875 unsigned long ret;
876
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100877 if (!clk)
878 return 0;
879
Mike Turquetteeab89f62013-03-28 13:59:01 -0700880 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700881 ret = __clk_round_rate(clk, rate);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700882 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700883
884 return ret;
885}
886EXPORT_SYMBOL_GPL(clk_round_rate);
887
888/**
889 * __clk_notify - call clk notifier chain
Stephen Boydd6968fc2015-04-30 13:54:13 -0700890 * @core: clk that is changing rate
Mike Turquetteb24764902012-03-15 23:11:19 -0700891 * @msg: clk notifier type (see include/linux/clk.h)
892 * @old_rate: old clk rate
893 * @new_rate: new clk rate
894 *
895 * Triggers a notifier call chain on the clk rate-change notification
896 * for 'clk'. Passes a pointer to the struct clk and the previous
897 * and current rates to the notifier callback. Intended to be called by
898 * internal clock code only. Returns NOTIFY_DONE from the last driver
899 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
900 * a driver returns that.
901 */
Stephen Boydd6968fc2015-04-30 13:54:13 -0700902static int __clk_notify(struct clk_core *core, unsigned long msg,
Mike Turquetteb24764902012-03-15 23:11:19 -0700903 unsigned long old_rate, unsigned long new_rate)
904{
905 struct clk_notifier *cn;
906 struct clk_notifier_data cnd;
907 int ret = NOTIFY_DONE;
908
Mike Turquetteb24764902012-03-15 23:11:19 -0700909 cnd.old_rate = old_rate;
910 cnd.new_rate = new_rate;
911
912 list_for_each_entry(cn, &clk_notifier_list, node) {
Stephen Boydd6968fc2015-04-30 13:54:13 -0700913 if (cn->clk->core == core) {
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100914 cnd.clk = cn->clk;
Mike Turquetteb24764902012-03-15 23:11:19 -0700915 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
916 &cnd);
Mike Turquetteb24764902012-03-15 23:11:19 -0700917 }
918 }
919
920 return ret;
921}
922
923/**
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100924 * __clk_recalc_accuracies
Stephen Boydd6968fc2015-04-30 13:54:13 -0700925 * @core: first clk in the subtree
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100926 *
927 * Walks the subtree of clks starting with clk and recalculates accuracies as
928 * it goes. Note that if a clk does not implement the .recalc_accuracy
Stephen Boyd6e5ab412015-04-30 15:11:31 -0700929 * callback then it is assumed that the clock will take on the accuracy of its
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100930 * parent.
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100931 */
Stephen Boydd6968fc2015-04-30 13:54:13 -0700932static void __clk_recalc_accuracies(struct clk_core *core)
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100933{
934 unsigned long parent_accuracy = 0;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100935 struct clk_core *child;
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100936
Krzysztof Kozlowski496eadf2015-01-09 09:28:10 +0100937 lockdep_assert_held(&prepare_lock);
938
Stephen Boydd6968fc2015-04-30 13:54:13 -0700939 if (core->parent)
940 parent_accuracy = core->parent->accuracy;
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100941
Stephen Boydd6968fc2015-04-30 13:54:13 -0700942 if (core->ops->recalc_accuracy)
943 core->accuracy = core->ops->recalc_accuracy(core->hw,
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100944 parent_accuracy);
945 else
Stephen Boydd6968fc2015-04-30 13:54:13 -0700946 core->accuracy = parent_accuracy;
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100947
Stephen Boydd6968fc2015-04-30 13:54:13 -0700948 hlist_for_each_entry(child, &core->children, child_node)
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100949 __clk_recalc_accuracies(child);
950}
951
Stephen Boydd6968fc2015-04-30 13:54:13 -0700952static long clk_core_get_accuracy(struct clk_core *core)
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100953{
954 unsigned long accuracy;
955
956 clk_prepare_lock();
Stephen Boydd6968fc2015-04-30 13:54:13 -0700957 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
958 __clk_recalc_accuracies(core);
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100959
Stephen Boydd6968fc2015-04-30 13:54:13 -0700960 accuracy = __clk_get_accuracy(core);
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100961 clk_prepare_unlock();
962
963 return accuracy;
964}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100965
966/**
967 * clk_get_accuracy - return the accuracy of clk
968 * @clk: the clk whose accuracy is being returned
969 *
970 * Simply returns the cached accuracy of the clk, unless
971 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
972 * issued.
973 * If clk is NULL then returns 0.
974 */
975long clk_get_accuracy(struct clk *clk)
976{
977 if (!clk)
978 return 0;
979
980 return clk_core_get_accuracy(clk->core);
981}
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100982EXPORT_SYMBOL_GPL(clk_get_accuracy);
983
Stephen Boydd6968fc2015-04-30 13:54:13 -0700984static unsigned long clk_recalc(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100985 unsigned long parent_rate)
Stephen Boyd8f2c2db2014-03-26 16:06:36 -0700986{
Stephen Boydd6968fc2015-04-30 13:54:13 -0700987 if (core->ops->recalc_rate)
988 return core->ops->recalc_rate(core->hw, parent_rate);
Stephen Boyd8f2c2db2014-03-26 16:06:36 -0700989 return parent_rate;
990}
991
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100992/**
Mike Turquetteb24764902012-03-15 23:11:19 -0700993 * __clk_recalc_rates
Stephen Boydd6968fc2015-04-30 13:54:13 -0700994 * @core: first clk in the subtree
Mike Turquetteb24764902012-03-15 23:11:19 -0700995 * @msg: notification type (see include/linux/clk.h)
996 *
997 * Walks the subtree of clks starting with clk and recalculates rates as it
998 * goes. Note that if a clk does not implement the .recalc_rate callback then
Peter Meerwald24ee1a02013-06-29 15:14:19 +0200999 * it is assumed that the clock will take on the rate of its parent.
Mike Turquetteb24764902012-03-15 23:11:19 -07001000 *
1001 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1002 * if necessary.
Mike Turquetteb24764902012-03-15 23:11:19 -07001003 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001004static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
Mike Turquetteb24764902012-03-15 23:11:19 -07001005{
1006 unsigned long old_rate;
1007 unsigned long parent_rate = 0;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001008 struct clk_core *child;
Mike Turquetteb24764902012-03-15 23:11:19 -07001009
Krzysztof Kozlowski496eadf2015-01-09 09:28:10 +01001010 lockdep_assert_held(&prepare_lock);
1011
Stephen Boydd6968fc2015-04-30 13:54:13 -07001012 old_rate = core->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07001013
Stephen Boydd6968fc2015-04-30 13:54:13 -07001014 if (core->parent)
1015 parent_rate = core->parent->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07001016
Stephen Boydd6968fc2015-04-30 13:54:13 -07001017 core->rate = clk_recalc(core, parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001018
1019 /*
1020 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1021 * & ABORT_RATE_CHANGE notifiers
1022 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001023 if (core->notifier_count && msg)
1024 __clk_notify(core, msg, old_rate, core->rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001025
Stephen Boydd6968fc2015-04-30 13:54:13 -07001026 hlist_for_each_entry(child, &core->children, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -07001027 __clk_recalc_rates(child, msg);
1028}
1029
Stephen Boydd6968fc2015-04-30 13:54:13 -07001030static unsigned long clk_core_get_rate(struct clk_core *core)
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001031{
1032 unsigned long rate;
1033
1034 clk_prepare_lock();
1035
Stephen Boydd6968fc2015-04-30 13:54:13 -07001036 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1037 __clk_recalc_rates(core, 0);
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001038
Stephen Boydd6968fc2015-04-30 13:54:13 -07001039 rate = clk_core_get_rate_nolock(core);
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001040 clk_prepare_unlock();
1041
1042 return rate;
1043}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001044
Mike Turquetteb24764902012-03-15 23:11:19 -07001045/**
Ulf Hanssona093bde2012-08-31 14:21:28 +02001046 * clk_get_rate - return the rate of clk
1047 * @clk: the clk whose rate is being returned
1048 *
1049 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1050 * is set, which means a recalc_rate will be issued.
1051 * If clk is NULL then returns 0.
1052 */
1053unsigned long clk_get_rate(struct clk *clk)
1054{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001055 if (!clk)
1056 return 0;
Ulf Hanssona093bde2012-08-31 14:21:28 +02001057
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001058 return clk_core_get_rate(clk->core);
Ulf Hanssona093bde2012-08-31 14:21:28 +02001059}
1060EXPORT_SYMBOL_GPL(clk_get_rate);
1061
Stephen Boydd6968fc2015-04-30 13:54:13 -07001062static int clk_fetch_parent_index(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001063 struct clk_core *parent)
James Hogan4935b222013-07-29 12:24:59 +01001064{
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001065 int i;
James Hogan4935b222013-07-29 12:24:59 +01001066
Stephen Boydd6968fc2015-04-30 13:54:13 -07001067 if (!core->parents) {
1068 core->parents = kcalloc(core->num_parents,
Tomasz Figa96a7ed92013-09-29 02:37:15 +02001069 sizeof(struct clk *), GFP_KERNEL);
Stephen Boydd6968fc2015-04-30 13:54:13 -07001070 if (!core->parents)
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001071 return -ENOMEM;
1072 }
James Hogan4935b222013-07-29 12:24:59 +01001073
1074 /*
1075 * find index of new parent clock using cached parent ptrs,
1076 * or if not yet cached, use string name comparison and cache
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001077 * them now to avoid future calls to clk_core_lookup.
James Hogan4935b222013-07-29 12:24:59 +01001078 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001079 for (i = 0; i < core->num_parents; i++) {
1080 if (core->parents[i] == parent)
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001081 return i;
Tomasz Figada0f0b22013-09-29 02:37:16 +02001082
Stephen Boydd6968fc2015-04-30 13:54:13 -07001083 if (core->parents[i])
Tomasz Figada0f0b22013-09-29 02:37:16 +02001084 continue;
1085
Stephen Boydd6968fc2015-04-30 13:54:13 -07001086 if (!strcmp(core->parent_names[i], parent->name)) {
1087 core->parents[i] = clk_core_lookup(parent->name);
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001088 return i;
James Hogan4935b222013-07-29 12:24:59 +01001089 }
1090 }
1091
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001092 return -EINVAL;
James Hogan4935b222013-07-29 12:24:59 +01001093}
1094
Heiko Stuebnere6500342015-04-22 22:53:05 +02001095/*
1096 * Update the orphan status of @core and all its children.
1097 */
1098static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1099{
1100 struct clk_core *child;
1101
1102 core->orphan = is_orphan;
1103
1104 hlist_for_each_entry(child, &core->children, child_node)
1105 clk_core_update_orphan_status(child, is_orphan);
1106}
1107
Stephen Boydd6968fc2015-04-30 13:54:13 -07001108static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
James Hogan4935b222013-07-29 12:24:59 +01001109{
Heiko Stuebnere6500342015-04-22 22:53:05 +02001110 bool was_orphan = core->orphan;
1111
Stephen Boydd6968fc2015-04-30 13:54:13 -07001112 hlist_del(&core->child_node);
James Hogan4935b222013-07-29 12:24:59 +01001113
James Hogan903efc52013-08-29 12:10:51 +01001114 if (new_parent) {
Heiko Stuebnere6500342015-04-22 22:53:05 +02001115 bool becomes_orphan = new_parent->orphan;
1116
James Hogan903efc52013-08-29 12:10:51 +01001117 /* avoid duplicate POST_RATE_CHANGE notifications */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001118 if (new_parent->new_child == core)
James Hogan903efc52013-08-29 12:10:51 +01001119 new_parent->new_child = NULL;
1120
Stephen Boydd6968fc2015-04-30 13:54:13 -07001121 hlist_add_head(&core->child_node, &new_parent->children);
Heiko Stuebnere6500342015-04-22 22:53:05 +02001122
1123 if (was_orphan != becomes_orphan)
1124 clk_core_update_orphan_status(core, becomes_orphan);
James Hogan903efc52013-08-29 12:10:51 +01001125 } else {
Stephen Boydd6968fc2015-04-30 13:54:13 -07001126 hlist_add_head(&core->child_node, &clk_orphan_list);
Heiko Stuebnere6500342015-04-22 22:53:05 +02001127 if (!was_orphan)
1128 clk_core_update_orphan_status(core, true);
James Hogan903efc52013-08-29 12:10:51 +01001129 }
James Hogan4935b222013-07-29 12:24:59 +01001130
Stephen Boydd6968fc2015-04-30 13:54:13 -07001131 core->parent = new_parent;
James Hogan4935b222013-07-29 12:24:59 +01001132}
1133
Stephen Boydd6968fc2015-04-30 13:54:13 -07001134static struct clk_core *__clk_set_parent_before(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001135 struct clk_core *parent)
James Hogan4935b222013-07-29 12:24:59 +01001136{
1137 unsigned long flags;
Stephen Boydd6968fc2015-04-30 13:54:13 -07001138 struct clk_core *old_parent = core->parent;
James Hogan4935b222013-07-29 12:24:59 +01001139
1140 /*
1141 * Migrate prepare state between parents and prevent race with
1142 * clk_enable().
1143 *
1144 * If the clock is not prepared, then a race with
1145 * clk_enable/disable() is impossible since we already have the
1146 * prepare lock (future calls to clk_enable() need to be preceded by
1147 * a clk_prepare()).
1148 *
1149 * If the clock is prepared, migrate the prepared state to the new
1150 * parent and also protect against a race with clk_enable() by
1151 * forcing the clock and the new parent on. This ensures that all
1152 * future calls to clk_enable() are practically NOPs with respect to
1153 * hardware and software states.
1154 *
1155 * See also: Comment for clk_set_parent() below.
1156 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001157 if (core->prepare_count) {
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001158 clk_core_prepare(parent);
Dong Aishengd2a5d462015-04-15 22:26:36 +08001159 flags = clk_enable_lock();
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001160 clk_core_enable(parent);
Stephen Boydd6968fc2015-04-30 13:54:13 -07001161 clk_core_enable(core);
Dong Aishengd2a5d462015-04-15 22:26:36 +08001162 clk_enable_unlock(flags);
James Hogan4935b222013-07-29 12:24:59 +01001163 }
1164
1165 /* update the clk tree topology */
1166 flags = clk_enable_lock();
Stephen Boydd6968fc2015-04-30 13:54:13 -07001167 clk_reparent(core, parent);
James Hogan4935b222013-07-29 12:24:59 +01001168 clk_enable_unlock(flags);
1169
Stephen Boyd3fa22522014-01-15 10:47:22 -08001170 return old_parent;
1171}
1172
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001173static void __clk_set_parent_after(struct clk_core *core,
1174 struct clk_core *parent,
1175 struct clk_core *old_parent)
Stephen Boyd3fa22522014-01-15 10:47:22 -08001176{
Dong Aishengd2a5d462015-04-15 22:26:36 +08001177 unsigned long flags;
1178
Stephen Boyd3fa22522014-01-15 10:47:22 -08001179 /*
1180 * Finish the migration of prepare state and undo the changes done
1181 * for preventing a race with clk_enable().
1182 */
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001183 if (core->prepare_count) {
Dong Aishengd2a5d462015-04-15 22:26:36 +08001184 flags = clk_enable_lock();
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001185 clk_core_disable(core);
1186 clk_core_disable(old_parent);
Dong Aishengd2a5d462015-04-15 22:26:36 +08001187 clk_enable_unlock(flags);
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001188 clk_core_unprepare(old_parent);
Stephen Boyd3fa22522014-01-15 10:47:22 -08001189 }
Stephen Boyd3fa22522014-01-15 10:47:22 -08001190}
1191
Stephen Boydd6968fc2015-04-30 13:54:13 -07001192static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001193 u8 p_index)
Stephen Boyd3fa22522014-01-15 10:47:22 -08001194{
1195 unsigned long flags;
1196 int ret = 0;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001197 struct clk_core *old_parent;
Stephen Boyd3fa22522014-01-15 10:47:22 -08001198
Stephen Boydd6968fc2015-04-30 13:54:13 -07001199 old_parent = __clk_set_parent_before(core, parent);
Stephen Boyd3fa22522014-01-15 10:47:22 -08001200
Stephen Boydd6968fc2015-04-30 13:54:13 -07001201 trace_clk_set_parent(core, parent);
Stephen Boyddfc202e2015-02-02 14:37:41 -08001202
James Hogan4935b222013-07-29 12:24:59 +01001203 /* change clock input source */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001204 if (parent && core->ops->set_parent)
1205 ret = core->ops->set_parent(core->hw, p_index);
James Hogan4935b222013-07-29 12:24:59 +01001206
Stephen Boydd6968fc2015-04-30 13:54:13 -07001207 trace_clk_set_parent_complete(core, parent);
Stephen Boyddfc202e2015-02-02 14:37:41 -08001208
James Hogan4935b222013-07-29 12:24:59 +01001209 if (ret) {
1210 flags = clk_enable_lock();
Stephen Boydd6968fc2015-04-30 13:54:13 -07001211 clk_reparent(core, old_parent);
James Hogan4935b222013-07-29 12:24:59 +01001212 clk_enable_unlock(flags);
1213
Stephen Boydd6968fc2015-04-30 13:54:13 -07001214 if (core->prepare_count) {
Dong Aishengd2a5d462015-04-15 22:26:36 +08001215 flags = clk_enable_lock();
Stephen Boydd6968fc2015-04-30 13:54:13 -07001216 clk_core_disable(core);
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001217 clk_core_disable(parent);
Dong Aishengd2a5d462015-04-15 22:26:36 +08001218 clk_enable_unlock(flags);
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001219 clk_core_unprepare(parent);
James Hogan4935b222013-07-29 12:24:59 +01001220 }
1221 return ret;
1222 }
1223
Stephen Boydd6968fc2015-04-30 13:54:13 -07001224 __clk_set_parent_after(core, parent, old_parent);
James Hogan4935b222013-07-29 12:24:59 +01001225
James Hogan4935b222013-07-29 12:24:59 +01001226 return 0;
1227}
1228
Ulf Hanssona093bde2012-08-31 14:21:28 +02001229/**
Mike Turquetteb24764902012-03-15 23:11:19 -07001230 * __clk_speculate_rates
Stephen Boydd6968fc2015-04-30 13:54:13 -07001231 * @core: first clk in the subtree
Mike Turquetteb24764902012-03-15 23:11:19 -07001232 * @parent_rate: the "future" rate of clk's parent
1233 *
1234 * Walks the subtree of clks starting with clk, speculating rates as it
1235 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1236 *
1237 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1238 * pre-rate change notifications and returns early if no clks in the
1239 * subtree have subscribed to the notifications. Note that if a clk does not
1240 * implement the .recalc_rate callback then it is assumed that the clock will
Peter Meerwald24ee1a02013-06-29 15:14:19 +02001241 * take on the rate of its parent.
Mike Turquetteb24764902012-03-15 23:11:19 -07001242 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001243static int __clk_speculate_rates(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001244 unsigned long parent_rate)
Mike Turquetteb24764902012-03-15 23:11:19 -07001245{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001246 struct clk_core *child;
Mike Turquetteb24764902012-03-15 23:11:19 -07001247 unsigned long new_rate;
1248 int ret = NOTIFY_DONE;
1249
Krzysztof Kozlowski496eadf2015-01-09 09:28:10 +01001250 lockdep_assert_held(&prepare_lock);
1251
Stephen Boydd6968fc2015-04-30 13:54:13 -07001252 new_rate = clk_recalc(core, parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001253
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001254 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001255 if (core->notifier_count)
1256 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001257
Mike Turquette86bcfa22014-02-24 16:08:41 -08001258 if (ret & NOTIFY_STOP_MASK) {
1259 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07001260 __func__, core->name, ret);
Mike Turquetteb24764902012-03-15 23:11:19 -07001261 goto out;
Mike Turquette86bcfa22014-02-24 16:08:41 -08001262 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001263
Stephen Boydd6968fc2015-04-30 13:54:13 -07001264 hlist_for_each_entry(child, &core->children, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -07001265 ret = __clk_speculate_rates(child, new_rate);
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001266 if (ret & NOTIFY_STOP_MASK)
Mike Turquetteb24764902012-03-15 23:11:19 -07001267 break;
1268 }
1269
1270out:
1271 return ret;
1272}
1273
Stephen Boydd6968fc2015-04-30 13:54:13 -07001274static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001275 struct clk_core *new_parent, u8 p_index)
Mike Turquetteb24764902012-03-15 23:11:19 -07001276{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001277 struct clk_core *child;
Mike Turquetteb24764902012-03-15 23:11:19 -07001278
Stephen Boydd6968fc2015-04-30 13:54:13 -07001279 core->new_rate = new_rate;
1280 core->new_parent = new_parent;
1281 core->new_parent_index = p_index;
James Hogan71472c02013-07-29 12:25:00 +01001282 /* include clk in new parent's PRE_RATE_CHANGE notifications */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001283 core->new_child = NULL;
1284 if (new_parent && new_parent != core->parent)
1285 new_parent->new_child = core;
Mike Turquetteb24764902012-03-15 23:11:19 -07001286
Stephen Boydd6968fc2015-04-30 13:54:13 -07001287 hlist_for_each_entry(child, &core->children, child_node) {
Stephen Boyd8f2c2db2014-03-26 16:06:36 -07001288 child->new_rate = clk_recalc(child, new_rate);
James Hogan71472c02013-07-29 12:25:00 +01001289 clk_calc_subtree(child, child->new_rate, NULL, 0);
Mike Turquetteb24764902012-03-15 23:11:19 -07001290 }
1291}
1292
1293/*
1294 * calculate the new rates returning the topmost clock that has to be
1295 * changed.
1296 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001297static struct clk_core *clk_calc_new_rates(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001298 unsigned long rate)
Mike Turquetteb24764902012-03-15 23:11:19 -07001299{
Stephen Boydd6968fc2015-04-30 13:54:13 -07001300 struct clk_core *top = core;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001301 struct clk_core *old_parent, *parent;
Shawn Guo81536e02012-04-12 20:50:17 +08001302 unsigned long best_parent_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001303 unsigned long new_rate;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001304 unsigned long min_rate;
1305 unsigned long max_rate;
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001306 int p_index = 0;
Boris Brezillon03bc10a2015-03-29 03:48:48 +02001307 long ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07001308
Mike Turquette7452b212012-03-26 14:45:36 -07001309 /* sanity */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001310 if (IS_ERR_OR_NULL(core))
Mike Turquette7452b212012-03-26 14:45:36 -07001311 return NULL;
1312
Mike Turquette63f5c3b2012-05-02 16:23:43 -07001313 /* save parent rate, if it exists */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001314 parent = old_parent = core->parent;
James Hogan71472c02013-07-29 12:25:00 +01001315 if (parent)
1316 best_parent_rate = parent->rate;
Mike Turquette63f5c3b2012-05-02 16:23:43 -07001317
Stephen Boydd6968fc2015-04-30 13:54:13 -07001318 clk_core_get_boundaries(core, &min_rate, &max_rate);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001319
James Hogan71472c02013-07-29 12:25:00 +01001320 /* find the closest rate and parent clk/rate */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001321 if (core->ops->determine_rate) {
Boris Brezillon0817b622015-07-07 20:48:08 +02001322 struct clk_rate_request req;
1323
1324 req.rate = rate;
1325 req.min_rate = min_rate;
1326 req.max_rate = max_rate;
1327 if (parent) {
1328 req.best_parent_hw = parent->hw;
1329 req.best_parent_rate = parent->rate;
1330 } else {
1331 req.best_parent_hw = NULL;
1332 req.best_parent_rate = 0;
1333 }
1334
1335 ret = core->ops->determine_rate(core->hw, &req);
Boris Brezillon03bc10a2015-03-29 03:48:48 +02001336 if (ret < 0)
1337 return NULL;
1338
Boris Brezillon0817b622015-07-07 20:48:08 +02001339 best_parent_rate = req.best_parent_rate;
1340 new_rate = req.rate;
1341 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
Stephen Boydd6968fc2015-04-30 13:54:13 -07001342 } else if (core->ops->round_rate) {
1343 ret = core->ops->round_rate(core->hw, rate,
Boris Brezillon0817b622015-07-07 20:48:08 +02001344 &best_parent_rate);
Boris Brezillon03bc10a2015-03-29 03:48:48 +02001345 if (ret < 0)
1346 return NULL;
1347
1348 new_rate = ret;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001349 if (new_rate < min_rate || new_rate > max_rate)
1350 return NULL;
Stephen Boydd6968fc2015-04-30 13:54:13 -07001351 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
James Hogan71472c02013-07-29 12:25:00 +01001352 /* pass-through clock without adjustable parent */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001353 core->new_rate = core->rate;
James Hogan71472c02013-07-29 12:25:00 +01001354 return NULL;
1355 } else {
1356 /* pass-through clock with adjustable parent */
1357 top = clk_calc_new_rates(parent, rate);
1358 new_rate = parent->new_rate;
Mike Turquette63f5c3b2012-05-02 16:23:43 -07001359 goto out;
Mike Turquette7452b212012-03-26 14:45:36 -07001360 }
1361
James Hogan71472c02013-07-29 12:25:00 +01001362 /* some clocks must be gated to change parent */
1363 if (parent != old_parent &&
Stephen Boydd6968fc2015-04-30 13:54:13 -07001364 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
James Hogan71472c02013-07-29 12:25:00 +01001365 pr_debug("%s: %s not gated but wants to reparent\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07001366 __func__, core->name);
Mike Turquetteb24764902012-03-15 23:11:19 -07001367 return NULL;
1368 }
1369
James Hogan71472c02013-07-29 12:25:00 +01001370 /* try finding the new parent index */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001371 if (parent && core->num_parents > 1) {
1372 p_index = clk_fetch_parent_index(core, parent);
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001373 if (p_index < 0) {
James Hogan71472c02013-07-29 12:25:00 +01001374 pr_debug("%s: clk %s can not be parent of clk %s\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07001375 __func__, parent->name, core->name);
James Hogan71472c02013-07-29 12:25:00 +01001376 return NULL;
1377 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001378 }
1379
Stephen Boydd6968fc2015-04-30 13:54:13 -07001380 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
James Hogan71472c02013-07-29 12:25:00 +01001381 best_parent_rate != parent->rate)
1382 top = clk_calc_new_rates(parent, best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001383
1384out:
Stephen Boydd6968fc2015-04-30 13:54:13 -07001385 clk_calc_subtree(core, new_rate, parent, p_index);
Mike Turquetteb24764902012-03-15 23:11:19 -07001386
1387 return top;
1388}
1389
1390/*
1391 * Notify about rate changes in a subtree. Always walk down the whole tree
1392 * so that in case of an error we can walk down the whole tree again and
1393 * abort the change.
1394 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001395static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001396 unsigned long event)
Mike Turquetteb24764902012-03-15 23:11:19 -07001397{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001398 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
Mike Turquetteb24764902012-03-15 23:11:19 -07001399 int ret = NOTIFY_DONE;
1400
Stephen Boydd6968fc2015-04-30 13:54:13 -07001401 if (core->rate == core->new_rate)
Sachin Kamat5fda6852013-03-13 15:17:49 +05301402 return NULL;
Mike Turquetteb24764902012-03-15 23:11:19 -07001403
Stephen Boydd6968fc2015-04-30 13:54:13 -07001404 if (core->notifier_count) {
1405 ret = __clk_notify(core, event, core->rate, core->new_rate);
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001406 if (ret & NOTIFY_STOP_MASK)
Stephen Boydd6968fc2015-04-30 13:54:13 -07001407 fail_clk = core;
Mike Turquetteb24764902012-03-15 23:11:19 -07001408 }
1409
Stephen Boydd6968fc2015-04-30 13:54:13 -07001410 hlist_for_each_entry(child, &core->children, child_node) {
James Hogan71472c02013-07-29 12:25:00 +01001411 /* Skip children who will be reparented to another clock */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001412 if (child->new_parent && child->new_parent != core)
James Hogan71472c02013-07-29 12:25:00 +01001413 continue;
1414 tmp_clk = clk_propagate_rate_change(child, event);
1415 if (tmp_clk)
1416 fail_clk = tmp_clk;
1417 }
1418
Stephen Boydd6968fc2015-04-30 13:54:13 -07001419 /* handle the new child who might not be in core->children yet */
1420 if (core->new_child) {
1421 tmp_clk = clk_propagate_rate_change(core->new_child, event);
James Hogan71472c02013-07-29 12:25:00 +01001422 if (tmp_clk)
1423 fail_clk = tmp_clk;
Mike Turquetteb24764902012-03-15 23:11:19 -07001424 }
1425
1426 return fail_clk;
1427}
1428
1429/*
1430 * walk down a subtree and set the new rates notifying the rate
1431 * change on the way
1432 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001433static void clk_change_rate(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -07001434{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001435 struct clk_core *child;
Tero Kristo067bb172014-08-21 16:47:45 +03001436 struct hlist_node *tmp;
Mike Turquetteb24764902012-03-15 23:11:19 -07001437 unsigned long old_rate;
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001438 unsigned long best_parent_rate = 0;
Stephen Boyd3fa22522014-01-15 10:47:22 -08001439 bool skip_set_rate = false;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001440 struct clk_core *old_parent;
Mike Turquetteb24764902012-03-15 23:11:19 -07001441
Stephen Boydd6968fc2015-04-30 13:54:13 -07001442 old_rate = core->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07001443
Stephen Boydd6968fc2015-04-30 13:54:13 -07001444 if (core->new_parent)
1445 best_parent_rate = core->new_parent->rate;
1446 else if (core->parent)
1447 best_parent_rate = core->parent->rate;
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001448
Stephen Boydd6968fc2015-04-30 13:54:13 -07001449 if (core->new_parent && core->new_parent != core->parent) {
1450 old_parent = __clk_set_parent_before(core, core->new_parent);
1451 trace_clk_set_parent(core, core->new_parent);
Stephen Boyd3fa22522014-01-15 10:47:22 -08001452
Stephen Boydd6968fc2015-04-30 13:54:13 -07001453 if (core->ops->set_rate_and_parent) {
Stephen Boyd3fa22522014-01-15 10:47:22 -08001454 skip_set_rate = true;
Stephen Boydd6968fc2015-04-30 13:54:13 -07001455 core->ops->set_rate_and_parent(core->hw, core->new_rate,
Stephen Boyd3fa22522014-01-15 10:47:22 -08001456 best_parent_rate,
Stephen Boydd6968fc2015-04-30 13:54:13 -07001457 core->new_parent_index);
1458 } else if (core->ops->set_parent) {
1459 core->ops->set_parent(core->hw, core->new_parent_index);
Stephen Boyd3fa22522014-01-15 10:47:22 -08001460 }
1461
Stephen Boydd6968fc2015-04-30 13:54:13 -07001462 trace_clk_set_parent_complete(core, core->new_parent);
1463 __clk_set_parent_after(core, core->new_parent, old_parent);
Stephen Boyd3fa22522014-01-15 10:47:22 -08001464 }
1465
Stephen Boydd6968fc2015-04-30 13:54:13 -07001466 trace_clk_set_rate(core, core->new_rate);
Stephen Boyddfc202e2015-02-02 14:37:41 -08001467
Stephen Boydd6968fc2015-04-30 13:54:13 -07001468 if (!skip_set_rate && core->ops->set_rate)
1469 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001470
Stephen Boydd6968fc2015-04-30 13:54:13 -07001471 trace_clk_set_rate_complete(core, core->new_rate);
Stephen Boyddfc202e2015-02-02 14:37:41 -08001472
Stephen Boydd6968fc2015-04-30 13:54:13 -07001473 core->rate = clk_recalc(core, best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001474
Stephen Boydd6968fc2015-04-30 13:54:13 -07001475 if (core->notifier_count && old_rate != core->rate)
1476 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001477
Michael Turquette85e88fa2015-06-20 12:18:03 -07001478 if (core->flags & CLK_RECALC_NEW_RATES)
1479 (void)clk_calc_new_rates(core, core->new_rate);
Bartlomiej Zolnierkiewiczd8d91982015-04-03 18:43:44 +02001480
Tero Kristo067bb172014-08-21 16:47:45 +03001481 /*
1482 * Use safe iteration, as change_rate can actually swap parents
1483 * for certain clock types.
1484 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001485 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
James Hogan71472c02013-07-29 12:25:00 +01001486 /* Skip children who will be reparented to another clock */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001487 if (child->new_parent && child->new_parent != core)
James Hogan71472c02013-07-29 12:25:00 +01001488 continue;
Mike Turquetteb24764902012-03-15 23:11:19 -07001489 clk_change_rate(child);
James Hogan71472c02013-07-29 12:25:00 +01001490 }
1491
Stephen Boydd6968fc2015-04-30 13:54:13 -07001492 /* handle the new child who might not be in core->children yet */
1493 if (core->new_child)
1494 clk_change_rate(core->new_child);
Mike Turquetteb24764902012-03-15 23:11:19 -07001495}
1496
Stephen Boydd6968fc2015-04-30 13:54:13 -07001497static int clk_core_set_rate_nolock(struct clk_core *core,
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001498 unsigned long req_rate)
1499{
1500 struct clk_core *top, *fail_clk;
1501 unsigned long rate = req_rate;
1502 int ret = 0;
1503
Stephen Boydd6968fc2015-04-30 13:54:13 -07001504 if (!core)
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001505 return 0;
1506
1507 /* bail early if nothing to do */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001508 if (rate == clk_core_get_rate_nolock(core))
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001509 return 0;
1510
Stephen Boydd6968fc2015-04-30 13:54:13 -07001511 if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001512 return -EBUSY;
1513
1514 /* calculate new rates and get the topmost changed clock */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001515 top = clk_calc_new_rates(core, rate);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001516 if (!top)
1517 return -EINVAL;
1518
1519 /* notify that we are about to change rates */
1520 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1521 if (fail_clk) {
1522 pr_debug("%s: failed to set %s rate\n", __func__,
1523 fail_clk->name);
1524 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1525 return -EBUSY;
1526 }
1527
1528 /* change the rates */
1529 clk_change_rate(top);
1530
Stephen Boydd6968fc2015-04-30 13:54:13 -07001531 core->req_rate = req_rate;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001532
1533 return ret;
1534}
1535
Mike Turquetteb24764902012-03-15 23:11:19 -07001536/**
1537 * clk_set_rate - specify a new rate for clk
1538 * @clk: the clk whose rate is being changed
1539 * @rate: the new rate for clk
1540 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001541 * In the simplest case clk_set_rate will only adjust the rate of clk.
Mike Turquetteb24764902012-03-15 23:11:19 -07001542 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001543 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1544 * propagate up to clk's parent; whether or not this happens depends on the
1545 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
1546 * after calling .round_rate then upstream parent propagation is ignored. If
1547 * *parent_rate comes back with a new rate for clk's parent then we propagate
Peter Meerwald24ee1a02013-06-29 15:14:19 +02001548 * up to clk's parent and set its rate. Upward propagation will continue
Mike Turquette5654dc92012-03-26 11:51:34 -07001549 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1550 * .round_rate stops requesting changes to clk's parent_rate.
Mike Turquetteb24764902012-03-15 23:11:19 -07001551 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001552 * Rate changes are accomplished via tree traversal that also recalculates the
1553 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
Mike Turquetteb24764902012-03-15 23:11:19 -07001554 *
1555 * Returns 0 on success, -EERROR otherwise.
1556 */
1557int clk_set_rate(struct clk *clk, unsigned long rate)
1558{
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001559 int ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07001560
Mike Turquette89ac8d72013-08-21 23:58:09 -07001561 if (!clk)
1562 return 0;
1563
Mike Turquetteb24764902012-03-15 23:11:19 -07001564 /* prevent racing with updates to the clock topology */
Mike Turquetteeab89f62013-03-28 13:59:01 -07001565 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001566
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001567 ret = clk_core_set_rate_nolock(clk->core, rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001568
Mike Turquetteeab89f62013-03-28 13:59:01 -07001569 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001570
1571 return ret;
1572}
1573EXPORT_SYMBOL_GPL(clk_set_rate);
1574
1575/**
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001576 * clk_set_rate_range - set a rate range for a clock source
1577 * @clk: clock source
1578 * @min: desired minimum clock rate in Hz, inclusive
1579 * @max: desired maximum clock rate in Hz, inclusive
1580 *
1581 * Returns success (0) or negative errno.
1582 */
1583int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
1584{
1585 int ret = 0;
1586
1587 if (!clk)
1588 return 0;
1589
1590 if (min > max) {
1591 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
1592 __func__, clk->core->name, clk->dev_id, clk->con_id,
1593 min, max);
1594 return -EINVAL;
1595 }
1596
1597 clk_prepare_lock();
1598
1599 if (min != clk->min_rate || max != clk->max_rate) {
1600 clk->min_rate = min;
1601 clk->max_rate = max;
1602 ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
1603 }
1604
1605 clk_prepare_unlock();
1606
1607 return ret;
1608}
1609EXPORT_SYMBOL_GPL(clk_set_rate_range);
1610
1611/**
1612 * clk_set_min_rate - set a minimum clock rate for a clock source
1613 * @clk: clock source
1614 * @rate: desired minimum clock rate in Hz, inclusive
1615 *
1616 * Returns success (0) or negative errno.
1617 */
1618int clk_set_min_rate(struct clk *clk, unsigned long rate)
1619{
1620 if (!clk)
1621 return 0;
1622
1623 return clk_set_rate_range(clk, rate, clk->max_rate);
1624}
1625EXPORT_SYMBOL_GPL(clk_set_min_rate);
1626
1627/**
1628 * clk_set_max_rate - set a maximum clock rate for a clock source
1629 * @clk: clock source
1630 * @rate: desired maximum clock rate in Hz, inclusive
1631 *
1632 * Returns success (0) or negative errno.
1633 */
1634int clk_set_max_rate(struct clk *clk, unsigned long rate)
1635{
1636 if (!clk)
1637 return 0;
1638
1639 return clk_set_rate_range(clk, clk->min_rate, rate);
1640}
1641EXPORT_SYMBOL_GPL(clk_set_max_rate);
1642
1643/**
Mike Turquetteb24764902012-03-15 23:11:19 -07001644 * clk_get_parent - return the parent of a clk
1645 * @clk: the clk whose parent gets returned
1646 *
1647 * Simply returns clk->parent. Returns NULL if clk is NULL.
1648 */
1649struct clk *clk_get_parent(struct clk *clk)
1650{
1651 struct clk *parent;
1652
Mike Turquetteeab89f62013-03-28 13:59:01 -07001653 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001654 parent = __clk_get_parent(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -07001655 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001656
1657 return parent;
1658}
1659EXPORT_SYMBOL_GPL(clk_get_parent);
1660
1661/*
1662 * .get_parent is mandatory for clocks with multiple possible parents. It is
1663 * optional for single-parent clocks. Always call .get_parent if it is
1664 * available and WARN if it is missing for multi-parent clocks.
1665 *
1666 * For single-parent clocks without .get_parent, first check to see if the
1667 * .parents array exists, and if so use it to avoid an expensive tree
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001668 * traversal. If .parents does not exist then walk the tree.
Mike Turquetteb24764902012-03-15 23:11:19 -07001669 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001670static struct clk_core *__clk_init_parent(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -07001671{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001672 struct clk_core *ret = NULL;
Mike Turquetteb24764902012-03-15 23:11:19 -07001673 u8 index;
1674
1675 /* handle the trivial cases */
1676
Stephen Boydd6968fc2015-04-30 13:54:13 -07001677 if (!core->num_parents)
Mike Turquetteb24764902012-03-15 23:11:19 -07001678 goto out;
1679
Stephen Boydd6968fc2015-04-30 13:54:13 -07001680 if (core->num_parents == 1) {
1681 if (IS_ERR_OR_NULL(core->parent))
1682 core->parent = clk_core_lookup(core->parent_names[0]);
1683 ret = core->parent;
Mike Turquetteb24764902012-03-15 23:11:19 -07001684 goto out;
1685 }
1686
Stephen Boydd6968fc2015-04-30 13:54:13 -07001687 if (!core->ops->get_parent) {
1688 WARN(!core->ops->get_parent,
Mike Turquetteb24764902012-03-15 23:11:19 -07001689 "%s: multi-parent clocks must implement .get_parent\n",
1690 __func__);
1691 goto out;
1692 };
1693
1694 /*
Stephen Boydd6968fc2015-04-30 13:54:13 -07001695 * Do our best to cache parent clocks in core->parents. This prevents
1696 * unnecessary and expensive lookups. We don't set core->parent here;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001697 * that is done by the calling function.
Mike Turquetteb24764902012-03-15 23:11:19 -07001698 */
1699
Stephen Boydd6968fc2015-04-30 13:54:13 -07001700 index = core->ops->get_parent(core->hw);
Mike Turquetteb24764902012-03-15 23:11:19 -07001701
Stephen Boydd6968fc2015-04-30 13:54:13 -07001702 if (!core->parents)
1703 core->parents =
1704 kcalloc(core->num_parents, sizeof(struct clk *),
Mike Turquetteb24764902012-03-15 23:11:19 -07001705 GFP_KERNEL);
1706
Stephen Boydd6968fc2015-04-30 13:54:13 -07001707 ret = clk_core_get_parent_by_index(core, index);
Mike Turquetteb24764902012-03-15 23:11:19 -07001708
1709out:
1710 return ret;
1711}
1712
Stephen Boydd6968fc2015-04-30 13:54:13 -07001713static void clk_core_reparent(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001714 struct clk_core *new_parent)
Ulf Hanssonb33d2122013-04-02 23:09:37 +02001715{
Stephen Boydd6968fc2015-04-30 13:54:13 -07001716 clk_reparent(core, new_parent);
1717 __clk_recalc_accuracies(core);
1718 __clk_recalc_rates(core, POST_RATE_CHANGE);
Mike Turquetteb24764902012-03-15 23:11:19 -07001719}
1720
Tomeu Vizoso42c86542015-03-11 11:34:25 +01001721void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
1722{
1723 if (!hw)
1724 return;
1725
1726 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
1727}
1728
Mike Turquetteb24764902012-03-15 23:11:19 -07001729/**
Thierry Reding4e88f3d2015-01-21 17:13:00 +01001730 * clk_has_parent - check if a clock is a possible parent for another
1731 * @clk: clock source
1732 * @parent: parent clock source
Mike Turquetteb24764902012-03-15 23:11:19 -07001733 *
Thierry Reding4e88f3d2015-01-21 17:13:00 +01001734 * This function can be used in drivers that need to check that a clock can be
1735 * the parent of another without actually changing the parent.
Saravana Kannanf8aa0bd2013-05-15 21:07:24 -07001736 *
Thierry Reding4e88f3d2015-01-21 17:13:00 +01001737 * Returns true if @parent is a possible parent for @clk, false otherwise.
Mike Turquetteb24764902012-03-15 23:11:19 -07001738 */
Thierry Reding4e88f3d2015-01-21 17:13:00 +01001739bool clk_has_parent(struct clk *clk, struct clk *parent)
1740{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001741 struct clk_core *core, *parent_core;
Thierry Reding4e88f3d2015-01-21 17:13:00 +01001742 unsigned int i;
1743
1744 /* NULL clocks should be nops, so return success if either is NULL. */
1745 if (!clk || !parent)
1746 return true;
1747
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001748 core = clk->core;
1749 parent_core = parent->core;
1750
Thierry Reding4e88f3d2015-01-21 17:13:00 +01001751 /* Optimize for the case where the parent is already the parent. */
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001752 if (core->parent == parent_core)
Thierry Reding4e88f3d2015-01-21 17:13:00 +01001753 return true;
1754
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001755 for (i = 0; i < core->num_parents; i++)
1756 if (strcmp(core->parent_names[i], parent_core->name) == 0)
Thierry Reding4e88f3d2015-01-21 17:13:00 +01001757 return true;
1758
1759 return false;
1760}
1761EXPORT_SYMBOL_GPL(clk_has_parent);
1762
Stephen Boydd6968fc2015-04-30 13:54:13 -07001763static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
Mike Turquetteb24764902012-03-15 23:11:19 -07001764{
1765 int ret = 0;
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001766 int p_index = 0;
Ulf Hansson031dcc92013-04-02 23:09:38 +02001767 unsigned long p_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001768
Stephen Boydd6968fc2015-04-30 13:54:13 -07001769 if (!core)
Mike Turquette89ac8d72013-08-21 23:58:09 -07001770 return 0;
1771
Mike Turquetteb24764902012-03-15 23:11:19 -07001772 /* prevent racing with updates to the clock topology */
Mike Turquetteeab89f62013-03-28 13:59:01 -07001773 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001774
Stephen Boydd6968fc2015-04-30 13:54:13 -07001775 if (core->parent == parent)
Mike Turquetteb24764902012-03-15 23:11:19 -07001776 goto out;
1777
Stephen Boydb61c43c2015-02-02 14:11:25 -08001778 /* verify ops for for multi-parent clks */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001779 if ((core->num_parents > 1) && (!core->ops->set_parent)) {
Stephen Boydb61c43c2015-02-02 14:11:25 -08001780 ret = -ENOSYS;
1781 goto out;
1782 }
1783
Ulf Hansson031dcc92013-04-02 23:09:38 +02001784 /* check that we are allowed to re-parent if the clock is in use */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001785 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
Ulf Hansson031dcc92013-04-02 23:09:38 +02001786 ret = -EBUSY;
1787 goto out;
1788 }
1789
1790 /* try finding the new parent index */
1791 if (parent) {
Stephen Boydd6968fc2015-04-30 13:54:13 -07001792 p_index = clk_fetch_parent_index(core, parent);
Ulf Hansson031dcc92013-04-02 23:09:38 +02001793 p_rate = parent->rate;
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001794 if (p_index < 0) {
Ulf Hansson031dcc92013-04-02 23:09:38 +02001795 pr_debug("%s: clk %s can not be parent of clk %s\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07001796 __func__, parent->name, core->name);
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001797 ret = p_index;
Ulf Hansson031dcc92013-04-02 23:09:38 +02001798 goto out;
1799 }
1800 }
1801
Mike Turquetteb24764902012-03-15 23:11:19 -07001802 /* propagate PRE_RATE_CHANGE notifications */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001803 ret = __clk_speculate_rates(core, p_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001804
1805 /* abort if a driver objects */
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001806 if (ret & NOTIFY_STOP_MASK)
Mike Turquetteb24764902012-03-15 23:11:19 -07001807 goto out;
1808
Ulf Hansson031dcc92013-04-02 23:09:38 +02001809 /* do the re-parent */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001810 ret = __clk_set_parent(core, parent, p_index);
Mike Turquetteb24764902012-03-15 23:11:19 -07001811
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001812 /* propagate rate an accuracy recalculation accordingly */
1813 if (ret) {
Stephen Boydd6968fc2015-04-30 13:54:13 -07001814 __clk_recalc_rates(core, ABORT_RATE_CHANGE);
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001815 } else {
Stephen Boydd6968fc2015-04-30 13:54:13 -07001816 __clk_recalc_rates(core, POST_RATE_CHANGE);
1817 __clk_recalc_accuracies(core);
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001818 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001819
1820out:
Mike Turquetteeab89f62013-03-28 13:59:01 -07001821 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001822
1823 return ret;
1824}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001825
1826/**
1827 * clk_set_parent - switch the parent of a mux clk
1828 * @clk: the mux clk whose input we are switching
1829 * @parent: the new input to clk
1830 *
1831 * Re-parent clk to use parent as its new input source. If clk is in
1832 * prepared state, the clk will get enabled for the duration of this call. If
1833 * that's not acceptable for a specific clk (Eg: the consumer can't handle
1834 * that, the reparenting is glitchy in hardware, etc), use the
1835 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
1836 *
1837 * After successfully changing clk's parent clk_set_parent will update the
1838 * clk topology, sysfs topology and propagate rate recalculation via
1839 * __clk_recalc_rates.
1840 *
1841 * Returns 0 on success, -EERROR otherwise.
1842 */
1843int clk_set_parent(struct clk *clk, struct clk *parent)
1844{
1845 if (!clk)
1846 return 0;
1847
1848 return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
1849}
Mike Turquetteb24764902012-03-15 23:11:19 -07001850EXPORT_SYMBOL_GPL(clk_set_parent);
1851
1852/**
Mike Turquettee59c5372014-02-18 21:21:25 -08001853 * clk_set_phase - adjust the phase shift of a clock signal
1854 * @clk: clock signal source
1855 * @degrees: number of degrees the signal is shifted
1856 *
1857 * Shifts the phase of a clock signal by the specified
1858 * degrees. Returns 0 on success, -EERROR otherwise.
1859 *
1860 * This function makes no distinction about the input or reference
1861 * signal that we adjust the clock signal phase against. For example
1862 * phase locked-loop clock signal generators we may shift phase with
1863 * respect to feedback clock signal input, but for other cases the
1864 * clock phase may be shifted with respect to some other, unspecified
1865 * signal.
1866 *
1867 * Additionally the concept of phase shift does not propagate through
1868 * the clock tree hierarchy, which sets it apart from clock rates and
1869 * clock accuracy. A parent clock phase attribute does not have an
1870 * impact on the phase attribute of a child clock.
1871 */
1872int clk_set_phase(struct clk *clk, int degrees)
1873{
Stephen Boyd08b95752015-02-02 14:09:43 -08001874 int ret = -EINVAL;
Mike Turquettee59c5372014-02-18 21:21:25 -08001875
1876 if (!clk)
Stephen Boyd08b95752015-02-02 14:09:43 -08001877 return 0;
Mike Turquettee59c5372014-02-18 21:21:25 -08001878
1879 /* sanity check degrees */
1880 degrees %= 360;
1881 if (degrees < 0)
1882 degrees += 360;
1883
1884 clk_prepare_lock();
1885
Stephen Boyddfc202e2015-02-02 14:37:41 -08001886 trace_clk_set_phase(clk->core, degrees);
1887
Stephen Boyd08b95752015-02-02 14:09:43 -08001888 if (clk->core->ops->set_phase)
1889 ret = clk->core->ops->set_phase(clk->core->hw, degrees);
Mike Turquettee59c5372014-02-18 21:21:25 -08001890
Stephen Boyddfc202e2015-02-02 14:37:41 -08001891 trace_clk_set_phase_complete(clk->core, degrees);
1892
Mike Turquettee59c5372014-02-18 21:21:25 -08001893 if (!ret)
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001894 clk->core->phase = degrees;
Mike Turquettee59c5372014-02-18 21:21:25 -08001895
Mike Turquettee59c5372014-02-18 21:21:25 -08001896 clk_prepare_unlock();
1897
Mike Turquettee59c5372014-02-18 21:21:25 -08001898 return ret;
1899}
Maxime Ripard9767b042015-01-20 22:23:43 +01001900EXPORT_SYMBOL_GPL(clk_set_phase);
Mike Turquettee59c5372014-02-18 21:21:25 -08001901
Stephen Boydd6968fc2015-04-30 13:54:13 -07001902static int clk_core_get_phase(struct clk_core *core)
Mike Turquettee59c5372014-02-18 21:21:25 -08001903{
Stephen Boyd1f3e1982015-04-30 14:21:56 -07001904 int ret;
Mike Turquettee59c5372014-02-18 21:21:25 -08001905
1906 clk_prepare_lock();
Stephen Boydd6968fc2015-04-30 13:54:13 -07001907 ret = core->phase;
Mike Turquettee59c5372014-02-18 21:21:25 -08001908 clk_prepare_unlock();
1909
Mike Turquettee59c5372014-02-18 21:21:25 -08001910 return ret;
1911}
1912
1913/**
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001914 * clk_get_phase - return the phase shift of a clock signal
1915 * @clk: clock signal source
1916 *
1917 * Returns the phase shift of a clock node in degrees, otherwise returns
1918 * -EERROR.
1919 */
1920int clk_get_phase(struct clk *clk)
1921{
1922 if (!clk)
1923 return 0;
1924
1925 return clk_core_get_phase(clk->core);
1926}
Stephen Boyd4dff95d2015-04-30 14:43:22 -07001927EXPORT_SYMBOL_GPL(clk_get_phase);
Mike Turquetteb24764902012-03-15 23:11:19 -07001928
1929/**
Michael Turquette3d3801e2015-02-25 09:11:01 -08001930 * clk_is_match - check if two clk's point to the same hardware clock
1931 * @p: clk compared against q
1932 * @q: clk compared against p
1933 *
1934 * Returns true if the two struct clk pointers both point to the same hardware
1935 * clock node. Put differently, returns true if struct clk *p and struct clk *q
1936 * share the same struct clk_core object.
1937 *
1938 * Returns false otherwise. Note that two NULL clks are treated as matching.
1939 */
1940bool clk_is_match(const struct clk *p, const struct clk *q)
1941{
1942 /* trivial case: identical struct clk's or both NULL */
1943 if (p == q)
1944 return true;
1945
1946 /* true if clk->core pointers match. Avoid derefing garbage */
1947 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
1948 if (p->core == q->core)
1949 return true;
1950
1951 return false;
1952}
1953EXPORT_SYMBOL_GPL(clk_is_match);
1954
Stephen Boyd4dff95d2015-04-30 14:43:22 -07001955/*** debugfs support ***/
1956
1957#ifdef CONFIG_DEBUG_FS
1958#include <linux/debugfs.h>
1959
1960static struct dentry *rootdir;
1961static int inited = 0;
1962static DEFINE_MUTEX(clk_debug_lock);
1963static HLIST_HEAD(clk_debug_list);
1964
1965static struct hlist_head *all_lists[] = {
1966 &clk_root_list,
1967 &clk_orphan_list,
1968 NULL,
1969};
1970
1971static struct hlist_head *orphan_list[] = {
1972 &clk_orphan_list,
1973 NULL,
1974};
1975
1976static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
1977 int level)
1978{
1979 if (!c)
1980 return;
1981
1982 seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
1983 level * 3 + 1, "",
1984 30 - level * 3, c->name,
1985 c->enable_count, c->prepare_count, clk_core_get_rate(c),
1986 clk_core_get_accuracy(c), clk_core_get_phase(c));
1987}
1988
1989static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
1990 int level)
1991{
1992 struct clk_core *child;
1993
1994 if (!c)
1995 return;
1996
1997 clk_summary_show_one(s, c, level);
1998
1999 hlist_for_each_entry(child, &c->children, child_node)
2000 clk_summary_show_subtree(s, child, level + 1);
2001}
2002
2003static int clk_summary_show(struct seq_file *s, void *data)
2004{
2005 struct clk_core *c;
2006 struct hlist_head **lists = (struct hlist_head **)s->private;
2007
2008 seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n");
2009 seq_puts(s, "----------------------------------------------------------------------------------------\n");
2010
2011 clk_prepare_lock();
2012
2013 for (; *lists; lists++)
2014 hlist_for_each_entry(c, *lists, child_node)
2015 clk_summary_show_subtree(s, c, 0);
2016
2017 clk_prepare_unlock();
2018
2019 return 0;
2020}
2021
2022
2023static int clk_summary_open(struct inode *inode, struct file *file)
2024{
2025 return single_open(file, clk_summary_show, inode->i_private);
2026}
2027
2028static const struct file_operations clk_summary_fops = {
2029 .open = clk_summary_open,
2030 .read = seq_read,
2031 .llseek = seq_lseek,
2032 .release = single_release,
2033};
2034
2035static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
2036{
2037 if (!c)
2038 return;
2039
Stefan Wahren7cb81132015-04-29 16:36:43 +00002040 /* This should be JSON format, i.e. elements separated with a comma */
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002041 seq_printf(s, "\"%s\": { ", c->name);
2042 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
2043 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
Stefan Wahren7cb81132015-04-29 16:36:43 +00002044 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
2045 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002046 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
2047}
2048
2049static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
2050{
2051 struct clk_core *child;
2052
2053 if (!c)
2054 return;
2055
2056 clk_dump_one(s, c, level);
2057
2058 hlist_for_each_entry(child, &c->children, child_node) {
2059 seq_printf(s, ",");
2060 clk_dump_subtree(s, child, level + 1);
2061 }
2062
2063 seq_printf(s, "}");
2064}
2065
2066static int clk_dump(struct seq_file *s, void *data)
2067{
2068 struct clk_core *c;
2069 bool first_node = true;
2070 struct hlist_head **lists = (struct hlist_head **)s->private;
2071
2072 seq_printf(s, "{");
2073
2074 clk_prepare_lock();
2075
2076 for (; *lists; lists++) {
2077 hlist_for_each_entry(c, *lists, child_node) {
2078 if (!first_node)
2079 seq_puts(s, ",");
2080 first_node = false;
2081 clk_dump_subtree(s, c, 0);
2082 }
2083 }
2084
2085 clk_prepare_unlock();
2086
Felipe Balbi70e9f4d2015-05-01 09:48:37 -05002087 seq_puts(s, "}\n");
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002088 return 0;
2089}
2090
2091
2092static int clk_dump_open(struct inode *inode, struct file *file)
2093{
2094 return single_open(file, clk_dump, inode->i_private);
2095}
2096
2097static const struct file_operations clk_dump_fops = {
2098 .open = clk_dump_open,
2099 .read = seq_read,
2100 .llseek = seq_lseek,
2101 .release = single_release,
2102};
2103
2104static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
2105{
2106 struct dentry *d;
2107 int ret = -ENOMEM;
2108
2109 if (!core || !pdentry) {
2110 ret = -EINVAL;
2111 goto out;
2112 }
2113
2114 d = debugfs_create_dir(core->name, pdentry);
2115 if (!d)
2116 goto out;
2117
2118 core->dentry = d;
2119
2120 d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry,
2121 (u32 *)&core->rate);
2122 if (!d)
2123 goto err_out;
2124
2125 d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry,
2126 (u32 *)&core->accuracy);
2127 if (!d)
2128 goto err_out;
2129
2130 d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry,
2131 (u32 *)&core->phase);
2132 if (!d)
2133 goto err_out;
2134
2135 d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry,
2136 (u32 *)&core->flags);
2137 if (!d)
2138 goto err_out;
2139
2140 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry,
2141 (u32 *)&core->prepare_count);
2142 if (!d)
2143 goto err_out;
2144
2145 d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry,
2146 (u32 *)&core->enable_count);
2147 if (!d)
2148 goto err_out;
2149
2150 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry,
2151 (u32 *)&core->notifier_count);
2152 if (!d)
2153 goto err_out;
2154
2155 if (core->ops->debug_init) {
2156 ret = core->ops->debug_init(core->hw, core->dentry);
2157 if (ret)
2158 goto err_out;
2159 }
2160
2161 ret = 0;
2162 goto out;
2163
2164err_out:
2165 debugfs_remove_recursive(core->dentry);
2166 core->dentry = NULL;
2167out:
2168 return ret;
2169}
2170
2171/**
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002172 * clk_debug_register - add a clk node to the debugfs clk directory
2173 * @core: the clk being added to the debugfs clk directory
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002174 *
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002175 * Dynamically adds a clk to the debugfs clk directory if debugfs has been
2176 * initialized. Otherwise it bails out early since the debugfs clk directory
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002177 * will be created lazily by clk_debug_init as part of a late_initcall.
2178 */
2179static int clk_debug_register(struct clk_core *core)
2180{
2181 int ret = 0;
2182
2183 mutex_lock(&clk_debug_lock);
2184 hlist_add_head(&core->debug_node, &clk_debug_list);
2185
2186 if (!inited)
2187 goto unlock;
2188
2189 ret = clk_debug_create_one(core, rootdir);
2190unlock:
2191 mutex_unlock(&clk_debug_lock);
2192
2193 return ret;
2194}
2195
2196 /**
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002197 * clk_debug_unregister - remove a clk node from the debugfs clk directory
2198 * @core: the clk being removed from the debugfs clk directory
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002199 *
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002200 * Dynamically removes a clk and all its child nodes from the
2201 * debugfs clk directory if clk->dentry points to debugfs created by
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002202 * clk_debug_register in __clk_init.
2203 */
2204static void clk_debug_unregister(struct clk_core *core)
2205{
2206 mutex_lock(&clk_debug_lock);
2207 hlist_del_init(&core->debug_node);
2208 debugfs_remove_recursive(core->dentry);
2209 core->dentry = NULL;
2210 mutex_unlock(&clk_debug_lock);
2211}
2212
2213struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
2214 void *data, const struct file_operations *fops)
2215{
2216 struct dentry *d = NULL;
2217
2218 if (hw->core->dentry)
2219 d = debugfs_create_file(name, mode, hw->core->dentry, data,
2220 fops);
2221
2222 return d;
2223}
2224EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
2225
2226/**
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002227 * clk_debug_init - lazily populate the debugfs clk directory
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002228 *
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002229 * clks are often initialized very early during boot before memory can be
2230 * dynamically allocated and well before debugfs is setup. This function
2231 * populates the debugfs clk directory once at boot-time when we know that
2232 * debugfs is setup. It should only be called once at boot-time, all other clks
2233 * added dynamically will be done so with clk_debug_register.
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002234 */
2235static int __init clk_debug_init(void)
2236{
2237 struct clk_core *core;
2238 struct dentry *d;
2239
2240 rootdir = debugfs_create_dir("clk", NULL);
2241
2242 if (!rootdir)
2243 return -ENOMEM;
2244
2245 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
2246 &clk_summary_fops);
2247 if (!d)
2248 return -ENOMEM;
2249
2250 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
2251 &clk_dump_fops);
2252 if (!d)
2253 return -ENOMEM;
2254
2255 d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
2256 &orphan_list, &clk_summary_fops);
2257 if (!d)
2258 return -ENOMEM;
2259
2260 d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
2261 &orphan_list, &clk_dump_fops);
2262 if (!d)
2263 return -ENOMEM;
2264
2265 mutex_lock(&clk_debug_lock);
2266 hlist_for_each_entry(core, &clk_debug_list, debug_node)
2267 clk_debug_create_one(core, rootdir);
2268
2269 inited = 1;
2270 mutex_unlock(&clk_debug_lock);
2271
2272 return 0;
2273}
2274late_initcall(clk_debug_init);
2275#else
2276static inline int clk_debug_register(struct clk_core *core) { return 0; }
2277static inline void clk_debug_reparent(struct clk_core *core,
2278 struct clk_core *new_parent)
2279{
2280}
2281static inline void clk_debug_unregister(struct clk_core *core)
2282{
2283}
2284#endif
2285
Michael Turquette3d3801e2015-02-25 09:11:01 -08002286/**
Mike Turquetteb24764902012-03-15 23:11:19 -07002287 * __clk_init - initialize the data structures in a struct clk
2288 * @dev: device initializing this clk, placeholder for now
2289 * @clk: clk being initialized
2290 *
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002291 * Initializes the lists in struct clk_core, queries the hardware for the
Mike Turquetteb24764902012-03-15 23:11:19 -07002292 * parent and rate and sets them both.
Mike Turquetteb24764902012-03-15 23:11:19 -07002293 */
Michael Turquetteb09d6d92015-01-29 14:22:50 -08002294static int __clk_init(struct device *dev, struct clk *clk_user)
Mike Turquetteb24764902012-03-15 23:11:19 -07002295{
Mike Turquetted1302a32012-03-29 14:30:40 -07002296 int i, ret = 0;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002297 struct clk_core *orphan;
Sasha Levinb67bfe02013-02-27 17:06:00 -08002298 struct hlist_node *tmp2;
Stephen Boydd6968fc2015-04-30 13:54:13 -07002299 struct clk_core *core;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002300 unsigned long rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07002301
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002302 if (!clk_user)
Mike Turquetted1302a32012-03-29 14:30:40 -07002303 return -EINVAL;
Mike Turquetteb24764902012-03-15 23:11:19 -07002304
Stephen Boydd6968fc2015-04-30 13:54:13 -07002305 core = clk_user->core;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002306
Mike Turquetteeab89f62013-03-28 13:59:01 -07002307 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002308
2309 /* check to see if a clock with this name is already registered */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002310 if (clk_core_lookup(core->name)) {
Mike Turquetted1302a32012-03-29 14:30:40 -07002311 pr_debug("%s: clk %s already initialized\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07002312 __func__, core->name);
Mike Turquetted1302a32012-03-29 14:30:40 -07002313 ret = -EEXIST;
Mike Turquetteb24764902012-03-15 23:11:19 -07002314 goto out;
Mike Turquetted1302a32012-03-29 14:30:40 -07002315 }
Mike Turquetteb24764902012-03-15 23:11:19 -07002316
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07002317 /* check that clk_ops are sane. See Documentation/clk.txt */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002318 if (core->ops->set_rate &&
2319 !((core->ops->round_rate || core->ops->determine_rate) &&
2320 core->ops->recalc_rate)) {
James Hogan71472c02013-07-29 12:25:00 +01002321 pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07002322 __func__, core->name);
Mike Turquetted1302a32012-03-29 14:30:40 -07002323 ret = -EINVAL;
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07002324 goto out;
2325 }
2326
Stephen Boydd6968fc2015-04-30 13:54:13 -07002327 if (core->ops->set_parent && !core->ops->get_parent) {
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07002328 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07002329 __func__, core->name);
Mike Turquetted1302a32012-03-29 14:30:40 -07002330 ret = -EINVAL;
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07002331 goto out;
2332 }
2333
Stephen Boydd6968fc2015-04-30 13:54:13 -07002334 if (core->ops->set_rate_and_parent &&
2335 !(core->ops->set_parent && core->ops->set_rate)) {
Stephen Boyd3fa22522014-01-15 10:47:22 -08002336 pr_warn("%s: %s must implement .set_parent & .set_rate\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07002337 __func__, core->name);
Stephen Boyd3fa22522014-01-15 10:47:22 -08002338 ret = -EINVAL;
2339 goto out;
2340 }
2341
Mike Turquetteb24764902012-03-15 23:11:19 -07002342 /* throw a WARN if any entries in parent_names are NULL */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002343 for (i = 0; i < core->num_parents; i++)
2344 WARN(!core->parent_names[i],
Mike Turquetteb24764902012-03-15 23:11:19 -07002345 "%s: invalid NULL in %s's .parent_names\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07002346 __func__, core->name);
Mike Turquetteb24764902012-03-15 23:11:19 -07002347
2348 /*
2349 * Allocate an array of struct clk *'s to avoid unnecessary string
2350 * look-ups of clk's possible parents. This can fail for clocks passed
Stephen Boydd6968fc2015-04-30 13:54:13 -07002351 * in to clk_init during early boot; thus any access to core->parents[]
Mike Turquetteb24764902012-03-15 23:11:19 -07002352 * must always check for a NULL pointer and try to populate it if
2353 * necessary.
2354 *
Stephen Boydd6968fc2015-04-30 13:54:13 -07002355 * If core->parents is not NULL we skip this entire block. This allows
2356 * for clock drivers to statically initialize core->parents.
Mike Turquetteb24764902012-03-15 23:11:19 -07002357 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002358 if (core->num_parents > 1 && !core->parents) {
2359 core->parents = kcalloc(core->num_parents, sizeof(struct clk *),
Tomasz Figa96a7ed92013-09-29 02:37:15 +02002360 GFP_KERNEL);
Mike Turquetteb24764902012-03-15 23:11:19 -07002361 /*
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002362 * clk_core_lookup returns NULL for parents that have not been
Mike Turquetteb24764902012-03-15 23:11:19 -07002363 * clk_init'd; thus any access to clk->parents[] must check
2364 * for a NULL pointer. We can always perform lazy lookups for
2365 * missing parents later on.
2366 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002367 if (core->parents)
2368 for (i = 0; i < core->num_parents; i++)
2369 core->parents[i] =
2370 clk_core_lookup(core->parent_names[i]);
Mike Turquetteb24764902012-03-15 23:11:19 -07002371 }
2372
Stephen Boydd6968fc2015-04-30 13:54:13 -07002373 core->parent = __clk_init_parent(core);
Mike Turquetteb24764902012-03-15 23:11:19 -07002374
2375 /*
Stephen Boydd6968fc2015-04-30 13:54:13 -07002376 * Populate core->parent if parent has already been __clk_init'd. If
Mike Turquetteb24764902012-03-15 23:11:19 -07002377 * parent has not yet been __clk_init'd then place clk in the orphan
2378 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
2379 * clk list.
2380 *
2381 * Every time a new clk is clk_init'd then we walk the list of orphan
2382 * clocks and re-parent any that are children of the clock currently
2383 * being clk_init'd.
2384 */
Heiko Stuebnere6500342015-04-22 22:53:05 +02002385 if (core->parent) {
Stephen Boydd6968fc2015-04-30 13:54:13 -07002386 hlist_add_head(&core->child_node,
2387 &core->parent->children);
Heiko Stuebnere6500342015-04-22 22:53:05 +02002388 core->orphan = core->parent->orphan;
2389 } else if (core->flags & CLK_IS_ROOT) {
Stephen Boydd6968fc2015-04-30 13:54:13 -07002390 hlist_add_head(&core->child_node, &clk_root_list);
Heiko Stuebnere6500342015-04-22 22:53:05 +02002391 core->orphan = false;
2392 } else {
Stephen Boydd6968fc2015-04-30 13:54:13 -07002393 hlist_add_head(&core->child_node, &clk_orphan_list);
Heiko Stuebnere6500342015-04-22 22:53:05 +02002394 core->orphan = true;
2395 }
Mike Turquetteb24764902012-03-15 23:11:19 -07002396
2397 /*
Boris BREZILLON5279fc42013-12-21 10:34:47 +01002398 * Set clk's accuracy. The preferred method is to use
2399 * .recalc_accuracy. For simple clocks and lazy developers the default
2400 * fallback is to use the parent's accuracy. If a clock doesn't have a
2401 * parent (or is orphaned) then accuracy is set to zero (perfect
2402 * clock).
2403 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002404 if (core->ops->recalc_accuracy)
2405 core->accuracy = core->ops->recalc_accuracy(core->hw,
2406 __clk_get_accuracy(core->parent));
2407 else if (core->parent)
2408 core->accuracy = core->parent->accuracy;
Boris BREZILLON5279fc42013-12-21 10:34:47 +01002409 else
Stephen Boydd6968fc2015-04-30 13:54:13 -07002410 core->accuracy = 0;
Boris BREZILLON5279fc42013-12-21 10:34:47 +01002411
2412 /*
Maxime Ripard9824cf72014-07-14 13:53:27 +02002413 * Set clk's phase.
2414 * Since a phase is by definition relative to its parent, just
2415 * query the current clock phase, or just assume it's in phase.
2416 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002417 if (core->ops->get_phase)
2418 core->phase = core->ops->get_phase(core->hw);
Maxime Ripard9824cf72014-07-14 13:53:27 +02002419 else
Stephen Boydd6968fc2015-04-30 13:54:13 -07002420 core->phase = 0;
Maxime Ripard9824cf72014-07-14 13:53:27 +02002421
2422 /*
Mike Turquetteb24764902012-03-15 23:11:19 -07002423 * Set clk's rate. The preferred method is to use .recalc_rate. For
2424 * simple clocks and lazy developers the default fallback is to use the
2425 * parent's rate. If a clock doesn't have a parent (or is orphaned)
2426 * then rate is set to zero.
2427 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002428 if (core->ops->recalc_rate)
2429 rate = core->ops->recalc_rate(core->hw,
2430 clk_core_get_rate_nolock(core->parent));
2431 else if (core->parent)
2432 rate = core->parent->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07002433 else
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002434 rate = 0;
Stephen Boydd6968fc2015-04-30 13:54:13 -07002435 core->rate = core->req_rate = rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07002436
2437 /*
2438 * walk the list of orphan clocks and reparent any that are children of
2439 * this clock
2440 */
Sasha Levinb67bfe02013-02-27 17:06:00 -08002441 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
Alex Elder12d298862013-09-05 08:33:24 -05002442 if (orphan->num_parents && orphan->ops->get_parent) {
Martin Fuzzey1f61e5f2012-11-22 20:15:05 +01002443 i = orphan->ops->get_parent(orphan->hw);
Stephen Boydd6968fc2015-04-30 13:54:13 -07002444 if (!strcmp(core->name, orphan->parent_names[i]))
2445 clk_core_reparent(orphan, core);
Martin Fuzzey1f61e5f2012-11-22 20:15:05 +01002446 continue;
2447 }
2448
Mike Turquetteb24764902012-03-15 23:11:19 -07002449 for (i = 0; i < orphan->num_parents; i++)
Stephen Boydd6968fc2015-04-30 13:54:13 -07002450 if (!strcmp(core->name, orphan->parent_names[i])) {
2451 clk_core_reparent(orphan, core);
Mike Turquetteb24764902012-03-15 23:11:19 -07002452 break;
2453 }
Martin Fuzzey1f61e5f2012-11-22 20:15:05 +01002454 }
Mike Turquetteb24764902012-03-15 23:11:19 -07002455
2456 /*
2457 * optional platform-specific magic
2458 *
2459 * The .init callback is not used by any of the basic clock types, but
2460 * exists for weird hardware that must perform initialization magic.
2461 * Please consider other ways of solving initialization problems before
Peter Meerwald24ee1a02013-06-29 15:14:19 +02002462 * using this callback, as its use is discouraged.
Mike Turquetteb24764902012-03-15 23:11:19 -07002463 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002464 if (core->ops->init)
2465 core->ops->init(core->hw);
Mike Turquetteb24764902012-03-15 23:11:19 -07002466
Stephen Boydd6968fc2015-04-30 13:54:13 -07002467 kref_init(&core->ref);
Mike Turquetteb24764902012-03-15 23:11:19 -07002468out:
Mike Turquetteeab89f62013-03-28 13:59:01 -07002469 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002470
Stephen Boyd89f7e9d2014-12-12 15:04:16 -08002471 if (!ret)
Stephen Boydd6968fc2015-04-30 13:54:13 -07002472 clk_debug_register(core);
Stephen Boyd89f7e9d2014-12-12 15:04:16 -08002473
Mike Turquetted1302a32012-03-29 14:30:40 -07002474 return ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07002475}
2476
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002477struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
2478 const char *con_id)
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002479{
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002480 struct clk *clk;
2481
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002482 /* This is to allow this function to be chained to others */
2483 if (!hw || IS_ERR(hw))
2484 return (struct clk *) hw;
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002485
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002486 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2487 if (!clk)
2488 return ERR_PTR(-ENOMEM);
2489
2490 clk->core = hw->core;
2491 clk->dev_id = dev_id;
2492 clk->con_id = con_id;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002493 clk->max_rate = ULONG_MAX;
2494
2495 clk_prepare_lock();
Stephen Boyd50595f82015-02-06 11:42:44 -08002496 hlist_add_head(&clk->clks_node, &hw->core->clks);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002497 clk_prepare_unlock();
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002498
2499 return clk;
2500}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002501
Stephen Boyd73e0e492015-02-06 11:42:43 -08002502void __clk_free_clk(struct clk *clk)
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002503{
2504 clk_prepare_lock();
Stephen Boyd50595f82015-02-06 11:42:44 -08002505 hlist_del(&clk->clks_node);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002506 clk_prepare_unlock();
2507
2508 kfree(clk);
2509}
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002510
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002511/**
2512 * clk_register - allocate a new clock, register it and return an opaque cookie
2513 * @dev: device that is registering this clock
2514 * @hw: link to hardware-specific clock data
2515 *
2516 * clk_register is the primary interface for populating the clock tree with new
2517 * clock nodes. It returns a pointer to the newly allocated struct clk which
Shailendra Vermaa59a5162015-05-21 00:06:48 +05302518 * cannot be dereferenced by driver code but may be used in conjunction with the
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002519 * rest of the clock API. In the event of an error clk_register will return an
2520 * error code; drivers must test for an error code after calling clk_register.
2521 */
2522struct clk *clk_register(struct device *dev, struct clk_hw *hw)
Mike Turquetteb24764902012-03-15 23:11:19 -07002523{
Mike Turquetted1302a32012-03-29 14:30:40 -07002524 int i, ret;
Stephen Boydd6968fc2015-04-30 13:54:13 -07002525 struct clk_core *core;
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002526
Stephen Boydd6968fc2015-04-30 13:54:13 -07002527 core = kzalloc(sizeof(*core), GFP_KERNEL);
2528 if (!core) {
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002529 ret = -ENOMEM;
2530 goto fail_out;
2531 }
Mike Turquetteb24764902012-03-15 23:11:19 -07002532
Stephen Boydd6968fc2015-04-30 13:54:13 -07002533 core->name = kstrdup_const(hw->init->name, GFP_KERNEL);
2534 if (!core->name) {
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002535 ret = -ENOMEM;
2536 goto fail_name;
2537 }
Stephen Boydd6968fc2015-04-30 13:54:13 -07002538 core->ops = hw->init->ops;
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02002539 if (dev && dev->driver)
Stephen Boydd6968fc2015-04-30 13:54:13 -07002540 core->owner = dev->driver->owner;
2541 core->hw = hw;
2542 core->flags = hw->init->flags;
2543 core->num_parents = hw->init->num_parents;
Stephen Boyd9783c0d2015-07-16 12:50:27 -07002544 core->min_rate = 0;
2545 core->max_rate = ULONG_MAX;
Stephen Boydd6968fc2015-04-30 13:54:13 -07002546 hw->core = core;
Mike Turquetteb24764902012-03-15 23:11:19 -07002547
Mike Turquetted1302a32012-03-29 14:30:40 -07002548 /* allocate local copy in case parent_names is __initdata */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002549 core->parent_names = kcalloc(core->num_parents, sizeof(char *),
Tomasz Figa96a7ed92013-09-29 02:37:15 +02002550 GFP_KERNEL);
Mike Turquetteb24764902012-03-15 23:11:19 -07002551
Stephen Boydd6968fc2015-04-30 13:54:13 -07002552 if (!core->parent_names) {
Mike Turquetted1302a32012-03-29 14:30:40 -07002553 ret = -ENOMEM;
2554 goto fail_parent_names;
2555 }
2556
2557
2558 /* copy each string name in case parent_names is __initdata */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002559 for (i = 0; i < core->num_parents; i++) {
2560 core->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002561 GFP_KERNEL);
Stephen Boydd6968fc2015-04-30 13:54:13 -07002562 if (!core->parent_names[i]) {
Mike Turquetted1302a32012-03-29 14:30:40 -07002563 ret = -ENOMEM;
2564 goto fail_parent_names_copy;
2565 }
2566 }
2567
Stephen Boydd6968fc2015-04-30 13:54:13 -07002568 INIT_HLIST_HEAD(&core->clks);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002569
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002570 hw->clk = __clk_create_clk(hw, NULL, NULL);
2571 if (IS_ERR(hw->clk)) {
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002572 ret = PTR_ERR(hw->clk);
2573 goto fail_parent_names_copy;
2574 }
Mike Turquetted1302a32012-03-29 14:30:40 -07002575
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002576 ret = __clk_init(dev, hw->clk);
Mike Turquetted1302a32012-03-29 14:30:40 -07002577 if (!ret)
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002578 return hw->clk;
2579
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002580 __clk_free_clk(hw->clk);
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002581 hw->clk = NULL;
Mike Turquetted1302a32012-03-29 14:30:40 -07002582
2583fail_parent_names_copy:
2584 while (--i >= 0)
Stephen Boydd6968fc2015-04-30 13:54:13 -07002585 kfree_const(core->parent_names[i]);
2586 kfree(core->parent_names);
Mike Turquetted1302a32012-03-29 14:30:40 -07002587fail_parent_names:
Stephen Boydd6968fc2015-04-30 13:54:13 -07002588 kfree_const(core->name);
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002589fail_name:
Stephen Boydd6968fc2015-04-30 13:54:13 -07002590 kfree(core);
Mike Turquetted1302a32012-03-29 14:30:40 -07002591fail_out:
2592 return ERR_PTR(ret);
Mike Turquetteb24764902012-03-15 23:11:19 -07002593}
2594EXPORT_SYMBOL_GPL(clk_register);
2595
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002596/* Free memory allocated for a clock. */
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002597static void __clk_release(struct kref *ref)
2598{
Stephen Boydd6968fc2015-04-30 13:54:13 -07002599 struct clk_core *core = container_of(ref, struct clk_core, ref);
2600 int i = core->num_parents;
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002601
Krzysztof Kozlowski496eadf2015-01-09 09:28:10 +01002602 lockdep_assert_held(&prepare_lock);
2603
Stephen Boydd6968fc2015-04-30 13:54:13 -07002604 kfree(core->parents);
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002605 while (--i >= 0)
Stephen Boydd6968fc2015-04-30 13:54:13 -07002606 kfree_const(core->parent_names[i]);
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002607
Stephen Boydd6968fc2015-04-30 13:54:13 -07002608 kfree(core->parent_names);
2609 kfree_const(core->name);
2610 kfree(core);
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002611}
2612
2613/*
2614 * Empty clk_ops for unregistered clocks. These are used temporarily
2615 * after clk_unregister() was called on a clock and until last clock
2616 * consumer calls clk_put() and the struct clk object is freed.
2617 */
2618static int clk_nodrv_prepare_enable(struct clk_hw *hw)
2619{
2620 return -ENXIO;
2621}
2622
2623static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
2624{
2625 WARN_ON_ONCE(1);
2626}
2627
2628static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
2629 unsigned long parent_rate)
2630{
2631 return -ENXIO;
2632}
2633
2634static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
2635{
2636 return -ENXIO;
2637}
2638
2639static const struct clk_ops clk_nodrv_ops = {
2640 .enable = clk_nodrv_prepare_enable,
2641 .disable = clk_nodrv_disable_unprepare,
2642 .prepare = clk_nodrv_prepare_enable,
2643 .unprepare = clk_nodrv_disable_unprepare,
2644 .set_rate = clk_nodrv_set_rate,
2645 .set_parent = clk_nodrv_set_parent,
2646};
2647
Mark Brown1df5c932012-04-18 09:07:12 +01002648/**
2649 * clk_unregister - unregister a currently registered clock
2650 * @clk: clock to unregister
Mark Brown1df5c932012-04-18 09:07:12 +01002651 */
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002652void clk_unregister(struct clk *clk)
2653{
2654 unsigned long flags;
2655
Stephen Boyd6314b672014-09-04 23:37:49 -07002656 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2657 return;
2658
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002659 clk_debug_unregister(clk->core);
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002660
2661 clk_prepare_lock();
2662
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002663 if (clk->core->ops == &clk_nodrv_ops) {
2664 pr_err("%s: unregistered clock: %s\n", __func__,
2665 clk->core->name);
Stephen Boyd6314b672014-09-04 23:37:49 -07002666 return;
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002667 }
2668 /*
2669 * Assign empty clock ops for consumers that might still hold
2670 * a reference to this clock.
2671 */
2672 flags = clk_enable_lock();
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002673 clk->core->ops = &clk_nodrv_ops;
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002674 clk_enable_unlock(flags);
2675
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002676 if (!hlist_empty(&clk->core->children)) {
2677 struct clk_core *child;
Stephen Boyd874f2242014-04-18 16:29:43 -07002678 struct hlist_node *t;
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002679
2680 /* Reparent all children to the orphan list. */
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002681 hlist_for_each_entry_safe(child, t, &clk->core->children,
2682 child_node)
2683 clk_core_set_parent(child, NULL);
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002684 }
2685
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002686 hlist_del_init(&clk->core->child_node);
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002687
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002688 if (clk->core->prepare_count)
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002689 pr_warn("%s: unregistering prepared clock: %s\n",
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002690 __func__, clk->core->name);
2691 kref_put(&clk->core->ref, __clk_release);
Stephen Boyd6314b672014-09-04 23:37:49 -07002692
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002693 clk_prepare_unlock();
2694}
Mark Brown1df5c932012-04-18 09:07:12 +01002695EXPORT_SYMBOL_GPL(clk_unregister);
2696
Stephen Boyd46c87732012-09-24 13:38:04 -07002697static void devm_clk_release(struct device *dev, void *res)
2698{
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002699 clk_unregister(*(struct clk **)res);
Stephen Boyd46c87732012-09-24 13:38:04 -07002700}
2701
2702/**
2703 * devm_clk_register - resource managed clk_register()
2704 * @dev: device that is registering this clock
2705 * @hw: link to hardware-specific clock data
2706 *
2707 * Managed clk_register(). Clocks returned from this function are
2708 * automatically clk_unregister()ed on driver detach. See clk_register() for
2709 * more information.
2710 */
2711struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
2712{
2713 struct clk *clk;
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002714 struct clk **clkp;
Stephen Boyd46c87732012-09-24 13:38:04 -07002715
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002716 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
2717 if (!clkp)
Stephen Boyd46c87732012-09-24 13:38:04 -07002718 return ERR_PTR(-ENOMEM);
2719
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002720 clk = clk_register(dev, hw);
2721 if (!IS_ERR(clk)) {
2722 *clkp = clk;
2723 devres_add(dev, clkp);
Stephen Boyd46c87732012-09-24 13:38:04 -07002724 } else {
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002725 devres_free(clkp);
Stephen Boyd46c87732012-09-24 13:38:04 -07002726 }
2727
2728 return clk;
2729}
2730EXPORT_SYMBOL_GPL(devm_clk_register);
2731
2732static int devm_clk_match(struct device *dev, void *res, void *data)
2733{
2734 struct clk *c = res;
2735 if (WARN_ON(!c))
2736 return 0;
2737 return c == data;
2738}
2739
2740/**
2741 * devm_clk_unregister - resource managed clk_unregister()
2742 * @clk: clock to unregister
2743 *
2744 * Deallocate a clock allocated with devm_clk_register(). Normally
2745 * this function will not need to be called and the resource management
2746 * code will ensure that the resource is freed.
2747 */
2748void devm_clk_unregister(struct device *dev, struct clk *clk)
2749{
2750 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
2751}
2752EXPORT_SYMBOL_GPL(devm_clk_unregister);
2753
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02002754/*
2755 * clkdev helpers
2756 */
2757int __clk_get(struct clk *clk)
2758{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002759 struct clk_core *core = !clk ? NULL : clk->core;
2760
2761 if (core) {
2762 if (!try_module_get(core->owner))
Sylwester Nawrocki00efcb12014-01-07 13:03:43 +01002763 return 0;
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02002764
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002765 kref_get(&core->ref);
Sylwester Nawrocki00efcb12014-01-07 13:03:43 +01002766 }
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02002767 return 1;
2768}
2769
2770void __clk_put(struct clk *clk)
2771{
Tomeu Vizoso10cdfe52014-12-02 08:54:19 +01002772 struct module *owner;
2773
Sylwester Nawrocki00efcb12014-01-07 13:03:43 +01002774 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02002775 return;
2776
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002777 clk_prepare_lock();
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002778
Stephen Boyd50595f82015-02-06 11:42:44 -08002779 hlist_del(&clk->clks_node);
Tomeu Vizosoec02ace2015-02-06 15:13:01 +01002780 if (clk->min_rate > clk->core->req_rate ||
2781 clk->max_rate < clk->core->req_rate)
2782 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
2783
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002784 owner = clk->core->owner;
2785 kref_put(&clk->core->ref, __clk_release);
2786
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002787 clk_prepare_unlock();
2788
Tomeu Vizoso10cdfe52014-12-02 08:54:19 +01002789 module_put(owner);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002790
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002791 kfree(clk);
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02002792}
2793
Mike Turquetteb24764902012-03-15 23:11:19 -07002794/*** clk rate change notifiers ***/
2795
2796/**
2797 * clk_notifier_register - add a clk rate change notifier
2798 * @clk: struct clk * to watch
2799 * @nb: struct notifier_block * with callback info
2800 *
2801 * Request notification when clk's rate changes. This uses an SRCU
2802 * notifier because we want it to block and notifier unregistrations are
2803 * uncommon. The callbacks associated with the notifier must not
2804 * re-enter into the clk framework by calling any top-level clk APIs;
2805 * this will cause a nested prepare_lock mutex.
2806 *
Soren Brinkmann5324fda2014-01-22 11:48:37 -08002807 * In all notification cases cases (pre, post and abort rate change) the
2808 * original clock rate is passed to the callback via struct
2809 * clk_notifier_data.old_rate and the new frequency is passed via struct
Mike Turquetteb24764902012-03-15 23:11:19 -07002810 * clk_notifier_data.new_rate.
2811 *
Mike Turquetteb24764902012-03-15 23:11:19 -07002812 * clk_notifier_register() must be called from non-atomic context.
2813 * Returns -EINVAL if called with null arguments, -ENOMEM upon
2814 * allocation failure; otherwise, passes along the return value of
2815 * srcu_notifier_chain_register().
2816 */
2817int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
2818{
2819 struct clk_notifier *cn;
2820 int ret = -ENOMEM;
2821
2822 if (!clk || !nb)
2823 return -EINVAL;
2824
Mike Turquetteeab89f62013-03-28 13:59:01 -07002825 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002826
2827 /* search the list of notifiers for this clk */
2828 list_for_each_entry(cn, &clk_notifier_list, node)
2829 if (cn->clk == clk)
2830 break;
2831
2832 /* if clk wasn't in the notifier list, allocate new clk_notifier */
2833 if (cn->clk != clk) {
2834 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
2835 if (!cn)
2836 goto out;
2837
2838 cn->clk = clk;
2839 srcu_init_notifier_head(&cn->notifier_head);
2840
2841 list_add(&cn->node, &clk_notifier_list);
2842 }
2843
2844 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
2845
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002846 clk->core->notifier_count++;
Mike Turquetteb24764902012-03-15 23:11:19 -07002847
2848out:
Mike Turquetteeab89f62013-03-28 13:59:01 -07002849 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002850
2851 return ret;
2852}
2853EXPORT_SYMBOL_GPL(clk_notifier_register);
2854
2855/**
2856 * clk_notifier_unregister - remove a clk rate change notifier
2857 * @clk: struct clk *
2858 * @nb: struct notifier_block * with callback info
2859 *
2860 * Request no further notification for changes to 'clk' and frees memory
2861 * allocated in clk_notifier_register.
2862 *
2863 * Returns -EINVAL if called with null arguments; otherwise, passes
2864 * along the return value of srcu_notifier_chain_unregister().
2865 */
2866int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
2867{
2868 struct clk_notifier *cn = NULL;
2869 int ret = -EINVAL;
2870
2871 if (!clk || !nb)
2872 return -EINVAL;
2873
Mike Turquetteeab89f62013-03-28 13:59:01 -07002874 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002875
2876 list_for_each_entry(cn, &clk_notifier_list, node)
2877 if (cn->clk == clk)
2878 break;
2879
2880 if (cn->clk == clk) {
2881 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
2882
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002883 clk->core->notifier_count--;
Mike Turquetteb24764902012-03-15 23:11:19 -07002884
2885 /* XXX the notifier code should handle this better */
2886 if (!cn->notifier_head.head) {
2887 srcu_cleanup_notifier_head(&cn->notifier_head);
Lai Jiangshan72b53222013-06-03 17:17:15 +08002888 list_del(&cn->node);
Mike Turquetteb24764902012-03-15 23:11:19 -07002889 kfree(cn);
2890 }
2891
2892 } else {
2893 ret = -ENOENT;
2894 }
2895
Mike Turquetteeab89f62013-03-28 13:59:01 -07002896 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002897
2898 return ret;
2899}
2900EXPORT_SYMBOL_GPL(clk_notifier_unregister);
Grant Likely766e6a42012-04-09 14:50:06 -05002901
2902#ifdef CONFIG_OF
2903/**
2904 * struct of_clk_provider - Clock provider registration structure
2905 * @link: Entry in global list of clock providers
2906 * @node: Pointer to device tree node of clock provider
2907 * @get: Get clock callback. Returns NULL or a struct clk for the
2908 * given clock specifier
2909 * @data: context pointer to be passed into @get callback
2910 */
2911struct of_clk_provider {
2912 struct list_head link;
2913
2914 struct device_node *node;
2915 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
2916 void *data;
2917};
2918
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +05302919static const struct of_device_id __clk_of_table_sentinel
2920 __used __section(__clk_of_table_end);
2921
Grant Likely766e6a42012-04-09 14:50:06 -05002922static LIST_HEAD(of_clk_providers);
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02002923static DEFINE_MUTEX(of_clk_mutex);
2924
Grant Likely766e6a42012-04-09 14:50:06 -05002925struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
2926 void *data)
2927{
2928 return data;
2929}
2930EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
2931
Shawn Guo494bfec2012-08-22 21:36:27 +08002932struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
2933{
2934 struct clk_onecell_data *clk_data = data;
2935 unsigned int idx = clkspec->args[0];
2936
2937 if (idx >= clk_data->clk_num) {
2938 pr_err("%s: invalid clock index %d\n", __func__, idx);
2939 return ERR_PTR(-EINVAL);
2940 }
2941
2942 return clk_data->clks[idx];
2943}
2944EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
2945
Grant Likely766e6a42012-04-09 14:50:06 -05002946/**
2947 * of_clk_add_provider() - Register a clock provider for a node
2948 * @np: Device node pointer associated with clock provider
2949 * @clk_src_get: callback for decoding clock
2950 * @data: context pointer for @clk_src_get callback.
2951 */
2952int of_clk_add_provider(struct device_node *np,
2953 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
2954 void *data),
2955 void *data)
2956{
2957 struct of_clk_provider *cp;
Sylwester Nawrocki86be4082014-06-18 17:29:32 +02002958 int ret;
Grant Likely766e6a42012-04-09 14:50:06 -05002959
2960 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
2961 if (!cp)
2962 return -ENOMEM;
2963
2964 cp->node = of_node_get(np);
2965 cp->data = data;
2966 cp->get = clk_src_get;
2967
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02002968 mutex_lock(&of_clk_mutex);
Grant Likely766e6a42012-04-09 14:50:06 -05002969 list_add(&cp->link, &of_clk_providers);
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02002970 mutex_unlock(&of_clk_mutex);
Grant Likely766e6a42012-04-09 14:50:06 -05002971 pr_debug("Added clock from %s\n", np->full_name);
2972
Sylwester Nawrocki86be4082014-06-18 17:29:32 +02002973 ret = of_clk_set_defaults(np, true);
2974 if (ret < 0)
2975 of_clk_del_provider(np);
2976
2977 return ret;
Grant Likely766e6a42012-04-09 14:50:06 -05002978}
2979EXPORT_SYMBOL_GPL(of_clk_add_provider);
2980
2981/**
2982 * of_clk_del_provider() - Remove a previously registered clock provider
2983 * @np: Device node pointer associated with clock provider
2984 */
2985void of_clk_del_provider(struct device_node *np)
2986{
2987 struct of_clk_provider *cp;
2988
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02002989 mutex_lock(&of_clk_mutex);
Grant Likely766e6a42012-04-09 14:50:06 -05002990 list_for_each_entry(cp, &of_clk_providers, link) {
2991 if (cp->node == np) {
2992 list_del(&cp->link);
2993 of_node_put(cp->node);
2994 kfree(cp);
2995 break;
2996 }
2997 }
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02002998 mutex_unlock(&of_clk_mutex);
Grant Likely766e6a42012-04-09 14:50:06 -05002999}
3000EXPORT_SYMBOL_GPL(of_clk_del_provider);
3001
Stephen Boyd73e0e492015-02-06 11:42:43 -08003002struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
3003 const char *dev_id, const char *con_id)
Grant Likely766e6a42012-04-09 14:50:06 -05003004{
3005 struct of_clk_provider *provider;
Jean-Francois Moinea34cd462013-11-25 19:47:04 +01003006 struct clk *clk = ERR_PTR(-EPROBE_DEFER);
Grant Likely766e6a42012-04-09 14:50:06 -05003007
Stephen Boyd306c3422015-02-05 15:39:11 -08003008 if (!clkspec)
3009 return ERR_PTR(-EINVAL);
3010
Grant Likely766e6a42012-04-09 14:50:06 -05003011 /* Check if we have such a provider in our array */
Stephen Boyd306c3422015-02-05 15:39:11 -08003012 mutex_lock(&of_clk_mutex);
Grant Likely766e6a42012-04-09 14:50:06 -05003013 list_for_each_entry(provider, &of_clk_providers, link) {
3014 if (provider->node == clkspec->np)
3015 clk = provider->get(clkspec, provider->data);
Stephen Boyd73e0e492015-02-06 11:42:43 -08003016 if (!IS_ERR(clk)) {
3017 clk = __clk_create_clk(__clk_get_hw(clk), dev_id,
3018 con_id);
3019
3020 if (!IS_ERR(clk) && !__clk_get(clk)) {
3021 __clk_free_clk(clk);
3022 clk = ERR_PTR(-ENOENT);
3023 }
3024
Grant Likely766e6a42012-04-09 14:50:06 -05003025 break;
Stephen Boyd73e0e492015-02-06 11:42:43 -08003026 }
Grant Likely766e6a42012-04-09 14:50:06 -05003027 }
Stephen Boyd306c3422015-02-05 15:39:11 -08003028 mutex_unlock(&of_clk_mutex);
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02003029
3030 return clk;
3031}
3032
Stephen Boyd306c3422015-02-05 15:39:11 -08003033/**
3034 * of_clk_get_from_provider() - Lookup a clock from a clock provider
3035 * @clkspec: pointer to a clock specifier data structure
3036 *
3037 * This function looks up a struct clk from the registered list of clock
3038 * providers, an input is a clock specifier data structure as returned
3039 * from the of_parse_phandle_with_args() function call.
3040 */
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02003041struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
3042{
Stephen Boyd306c3422015-02-05 15:39:11 -08003043 return __of_clk_get_from_provider(clkspec, NULL, __func__);
Grant Likely766e6a42012-04-09 14:50:06 -05003044}
3045
Mike Turquettef6102742013-10-07 23:12:13 -07003046int of_clk_get_parent_count(struct device_node *np)
3047{
3048 return of_count_phandle_with_args(np, "clocks", "#clock-cells");
3049}
3050EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
3051
Grant Likely766e6a42012-04-09 14:50:06 -05003052const char *of_clk_get_parent_name(struct device_node *np, int index)
3053{
3054 struct of_phandle_args clkspec;
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00003055 struct property *prop;
Grant Likely766e6a42012-04-09 14:50:06 -05003056 const char *clk_name;
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00003057 const __be32 *vp;
3058 u32 pv;
Grant Likely766e6a42012-04-09 14:50:06 -05003059 int rc;
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00003060 int count;
Grant Likely766e6a42012-04-09 14:50:06 -05003061
3062 if (index < 0)
3063 return NULL;
3064
3065 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
3066 &clkspec);
3067 if (rc)
3068 return NULL;
3069
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00003070 index = clkspec.args_count ? clkspec.args[0] : 0;
3071 count = 0;
3072
3073 /* if there is an indices property, use it to transfer the index
3074 * specified into an array offset for the clock-output-names property.
3075 */
3076 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
3077 if (index == pv) {
3078 index = count;
3079 break;
3080 }
3081 count++;
3082 }
3083
Grant Likely766e6a42012-04-09 14:50:06 -05003084 if (of_property_read_string_index(clkspec.np, "clock-output-names",
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00003085 index,
Grant Likely766e6a42012-04-09 14:50:06 -05003086 &clk_name) < 0)
3087 clk_name = clkspec.np->name;
3088
3089 of_node_put(clkspec.np);
3090 return clk_name;
3091}
3092EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
3093
Dinh Nguyen2e61dfb2015-06-05 11:26:13 -05003094/**
3095 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
3096 * number of parents
3097 * @np: Device node pointer associated with clock provider
3098 * @parents: pointer to char array that hold the parents' names
3099 * @size: size of the @parents array
3100 *
3101 * Return: number of parents for the clock node.
3102 */
3103int of_clk_parent_fill(struct device_node *np, const char **parents,
3104 unsigned int size)
3105{
3106 unsigned int i = 0;
3107
3108 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
3109 i++;
3110
3111 return i;
3112}
3113EXPORT_SYMBOL_GPL(of_clk_parent_fill);
3114
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003115struct clock_provider {
3116 of_clk_init_cb_t clk_init_cb;
3117 struct device_node *np;
3118 struct list_head node;
3119};
3120
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003121/*
3122 * This function looks for a parent clock. If there is one, then it
3123 * checks that the provider for this parent clock was initialized, in
3124 * this case the parent clock will be ready.
3125 */
3126static int parent_ready(struct device_node *np)
3127{
3128 int i = 0;
3129
3130 while (true) {
3131 struct clk *clk = of_clk_get(np, i);
3132
3133 /* this parent is ready we can check the next one */
3134 if (!IS_ERR(clk)) {
3135 clk_put(clk);
3136 i++;
3137 continue;
3138 }
3139
3140 /* at least one parent is not ready, we exit now */
3141 if (PTR_ERR(clk) == -EPROBE_DEFER)
3142 return 0;
3143
3144 /*
3145 * Here we make assumption that the device tree is
3146 * written correctly. So an error means that there is
3147 * no more parent. As we didn't exit yet, then the
3148 * previous parent are ready. If there is no clock
3149 * parent, no need to wait for them, then we can
3150 * consider their absence as being ready
3151 */
3152 return 1;
3153 }
3154}
3155
Grant Likely766e6a42012-04-09 14:50:06 -05003156/**
3157 * of_clk_init() - Scan and init clock providers from the DT
3158 * @matches: array of compatible values and init functions for providers.
3159 *
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003160 * This function scans the device tree for matching clock providers
Sylwester Nawrockie5ca8fb2014-03-27 12:08:36 +01003161 * and calls their initialization functions. It also does it by trying
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003162 * to follow the dependencies.
Grant Likely766e6a42012-04-09 14:50:06 -05003163 */
3164void __init of_clk_init(const struct of_device_id *matches)
3165{
Alex Elder7f7ed582013-08-22 11:31:31 -05003166 const struct of_device_id *match;
Grant Likely766e6a42012-04-09 14:50:06 -05003167 struct device_node *np;
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003168 struct clock_provider *clk_provider, *next;
3169 bool is_init_done;
3170 bool force = false;
Stephen Boyd2573a022015-07-06 16:50:00 -07003171 LIST_HEAD(clk_provider_list);
Grant Likely766e6a42012-04-09 14:50:06 -05003172
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +05303173 if (!matches)
Tero Kristo819b4862013-10-22 11:39:36 +03003174 matches = &__clk_of_table;
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +05303175
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003176 /* First prepare the list of the clocks providers */
Alex Elder7f7ed582013-08-22 11:31:31 -05003177 for_each_matching_node_and_match(np, matches, &match) {
Stephen Boyd2e3b19f2015-07-06 16:48:19 -07003178 struct clock_provider *parent;
3179
3180 parent = kzalloc(sizeof(*parent), GFP_KERNEL);
3181 if (!parent) {
3182 list_for_each_entry_safe(clk_provider, next,
3183 &clk_provider_list, node) {
3184 list_del(&clk_provider->node);
3185 kfree(clk_provider);
3186 }
3187 return;
3188 }
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003189
3190 parent->clk_init_cb = match->data;
3191 parent->np = np;
Sylwester Nawrocki3f6d4392014-03-27 11:43:32 +01003192 list_add_tail(&parent->node, &clk_provider_list);
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003193 }
3194
3195 while (!list_empty(&clk_provider_list)) {
3196 is_init_done = false;
3197 list_for_each_entry_safe(clk_provider, next,
3198 &clk_provider_list, node) {
3199 if (force || parent_ready(clk_provider->np)) {
Sylwester Nawrocki86be4082014-06-18 17:29:32 +02003200
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003201 clk_provider->clk_init_cb(clk_provider->np);
Sylwester Nawrocki86be4082014-06-18 17:29:32 +02003202 of_clk_set_defaults(clk_provider->np, true);
3203
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003204 list_del(&clk_provider->node);
3205 kfree(clk_provider);
3206 is_init_done = true;
3207 }
3208 }
3209
3210 /*
Sylwester Nawrockie5ca8fb2014-03-27 12:08:36 +01003211 * We didn't manage to initialize any of the
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003212 * remaining providers during the last loop, so now we
3213 * initialize all the remaining ones unconditionally
3214 * in case the clock parent was not mandatory
3215 */
3216 if (!is_init_done)
3217 force = true;
Grant Likely766e6a42012-04-09 14:50:06 -05003218 }
3219}
3220#endif