blob: 113d75db371da3cd6fb9026cf0b36a219833aef2 [file] [log] [blame]
Mike Turquetteb24764902012-03-15 23:11:19 -07001/*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Standard functionality for the common clock API. See Documentation/clk.txt
10 */
11
12#include <linux/clk-private.h>
Sylwester Nawrocki86be4082014-06-18 17:29:32 +020013#include <linux/clk/clk-conf.h>
Mike Turquetteb24764902012-03-15 23:11:19 -070014#include <linux/module.h>
15#include <linux/mutex.h>
16#include <linux/spinlock.h>
17#include <linux/err.h>
18#include <linux/list.h>
19#include <linux/slab.h>
Grant Likely766e6a42012-04-09 14:50:06 -050020#include <linux/of.h>
Stephen Boyd46c87732012-09-24 13:38:04 -070021#include <linux/device.h>
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +053022#include <linux/init.h>
Mike Turquette533ddeb2013-03-28 13:59:02 -070023#include <linux/sched.h>
Mike Turquetteb24764902012-03-15 23:11:19 -070024
Sylwester Nawrockid6782c22013-08-23 17:03:43 +020025#include "clk.h"
26
Mike Turquetteb24764902012-03-15 23:11:19 -070027static DEFINE_SPINLOCK(enable_lock);
28static DEFINE_MUTEX(prepare_lock);
29
Mike Turquette533ddeb2013-03-28 13:59:02 -070030static struct task_struct *prepare_owner;
31static struct task_struct *enable_owner;
32
33static int prepare_refcnt;
34static int enable_refcnt;
35
Mike Turquetteb24764902012-03-15 23:11:19 -070036static HLIST_HEAD(clk_root_list);
37static HLIST_HEAD(clk_orphan_list);
38static LIST_HEAD(clk_notifier_list);
39
Mike Turquetteeab89f62013-03-28 13:59:01 -070040/*** locking ***/
41static void clk_prepare_lock(void)
42{
Mike Turquette533ddeb2013-03-28 13:59:02 -070043 if (!mutex_trylock(&prepare_lock)) {
44 if (prepare_owner == current) {
45 prepare_refcnt++;
46 return;
47 }
48 mutex_lock(&prepare_lock);
49 }
50 WARN_ON_ONCE(prepare_owner != NULL);
51 WARN_ON_ONCE(prepare_refcnt != 0);
52 prepare_owner = current;
53 prepare_refcnt = 1;
Mike Turquetteeab89f62013-03-28 13:59:01 -070054}
55
56static void clk_prepare_unlock(void)
57{
Mike Turquette533ddeb2013-03-28 13:59:02 -070058 WARN_ON_ONCE(prepare_owner != current);
59 WARN_ON_ONCE(prepare_refcnt == 0);
60
61 if (--prepare_refcnt)
62 return;
63 prepare_owner = NULL;
Mike Turquetteeab89f62013-03-28 13:59:01 -070064 mutex_unlock(&prepare_lock);
65}
66
67static unsigned long clk_enable_lock(void)
68{
69 unsigned long flags;
Mike Turquette533ddeb2013-03-28 13:59:02 -070070
71 if (!spin_trylock_irqsave(&enable_lock, flags)) {
72 if (enable_owner == current) {
73 enable_refcnt++;
74 return flags;
75 }
76 spin_lock_irqsave(&enable_lock, flags);
77 }
78 WARN_ON_ONCE(enable_owner != NULL);
79 WARN_ON_ONCE(enable_refcnt != 0);
80 enable_owner = current;
81 enable_refcnt = 1;
Mike Turquetteeab89f62013-03-28 13:59:01 -070082 return flags;
83}
84
85static void clk_enable_unlock(unsigned long flags)
86{
Mike Turquette533ddeb2013-03-28 13:59:02 -070087 WARN_ON_ONCE(enable_owner != current);
88 WARN_ON_ONCE(enable_refcnt == 0);
89
90 if (--enable_refcnt)
91 return;
92 enable_owner = NULL;
Mike Turquetteeab89f62013-03-28 13:59:01 -070093 spin_unlock_irqrestore(&enable_lock, flags);
94}
95
Mike Turquetteb24764902012-03-15 23:11:19 -070096/*** debugfs support ***/
97
Mike Turquetteea72dc22013-12-18 21:38:52 -080098#ifdef CONFIG_DEBUG_FS
Mike Turquetteb24764902012-03-15 23:11:19 -070099#include <linux/debugfs.h>
100
101static struct dentry *rootdir;
Mike Turquetteb24764902012-03-15 23:11:19 -0700102static int inited = 0;
103
Sachin Kamat6b44c8542014-07-01 11:56:34 +0530104static struct hlist_head *all_lists[] = {
105 &clk_root_list,
106 &clk_orphan_list,
107 NULL,
108};
109
110static struct hlist_head *orphan_list[] = {
111 &clk_orphan_list,
112 NULL,
113};
114
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530115static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
116{
117 if (!c)
118 return;
119
Mike Turquettee59c5372014-02-18 21:21:25 -0800120 seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530121 level * 3 + 1, "",
122 30 - level * 3, c->name,
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100123 c->enable_count, c->prepare_count, clk_get_rate(c),
Mike Turquettee59c5372014-02-18 21:21:25 -0800124 clk_get_accuracy(c), clk_get_phase(c));
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530125}
126
127static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
128 int level)
129{
130 struct clk *child;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530131
132 if (!c)
133 return;
134
135 clk_summary_show_one(s, c, level);
136
Sasha Levinb67bfe02013-02-27 17:06:00 -0800137 hlist_for_each_entry(child, &c->children, child_node)
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530138 clk_summary_show_subtree(s, child, level + 1);
139}
140
141static int clk_summary_show(struct seq_file *s, void *data)
142{
143 struct clk *c;
Peter De Schrijver27b8d5f2014-05-30 18:03:57 +0300144 struct hlist_head **lists = (struct hlist_head **)s->private;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530145
Mike Turquettee59c5372014-02-18 21:21:25 -0800146 seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n");
147 seq_puts(s, "----------------------------------------------------------------------------------------\n");
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530148
Mike Turquetteeab89f62013-03-28 13:59:01 -0700149 clk_prepare_lock();
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530150
Peter De Schrijver27b8d5f2014-05-30 18:03:57 +0300151 for (; *lists; lists++)
152 hlist_for_each_entry(c, *lists, child_node)
153 clk_summary_show_subtree(s, c, 0);
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530154
Mike Turquetteeab89f62013-03-28 13:59:01 -0700155 clk_prepare_unlock();
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530156
157 return 0;
158}
159
160
161static int clk_summary_open(struct inode *inode, struct file *file)
162{
163 return single_open(file, clk_summary_show, inode->i_private);
164}
165
166static const struct file_operations clk_summary_fops = {
167 .open = clk_summary_open,
168 .read = seq_read,
169 .llseek = seq_lseek,
170 .release = single_release,
171};
172
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530173static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
174{
175 if (!c)
176 return;
177
178 seq_printf(s, "\"%s\": { ", c->name);
179 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
180 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
Peter De Schrijver670decd2013-06-05 18:06:35 +0300181 seq_printf(s, "\"rate\": %lu", clk_get_rate(c));
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100182 seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c));
Mike Turquettee59c5372014-02-18 21:21:25 -0800183 seq_printf(s, "\"phase\": %d", clk_get_phase(c));
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530184}
185
186static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
187{
188 struct clk *child;
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530189
190 if (!c)
191 return;
192
193 clk_dump_one(s, c, level);
194
Sasha Levinb67bfe02013-02-27 17:06:00 -0800195 hlist_for_each_entry(child, &c->children, child_node) {
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530196 seq_printf(s, ",");
197 clk_dump_subtree(s, child, level + 1);
198 }
199
200 seq_printf(s, "}");
201}
202
203static int clk_dump(struct seq_file *s, void *data)
204{
205 struct clk *c;
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530206 bool first_node = true;
Peter De Schrijver27b8d5f2014-05-30 18:03:57 +0300207 struct hlist_head **lists = (struct hlist_head **)s->private;
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530208
209 seq_printf(s, "{");
210
Mike Turquetteeab89f62013-03-28 13:59:01 -0700211 clk_prepare_lock();
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530212
Peter De Schrijver27b8d5f2014-05-30 18:03:57 +0300213 for (; *lists; lists++) {
214 hlist_for_each_entry(c, *lists, child_node) {
215 if (!first_node)
216 seq_puts(s, ",");
217 first_node = false;
218 clk_dump_subtree(s, c, 0);
219 }
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530220 }
221
Mike Turquetteeab89f62013-03-28 13:59:01 -0700222 clk_prepare_unlock();
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530223
224 seq_printf(s, "}");
225 return 0;
226}
227
228
229static int clk_dump_open(struct inode *inode, struct file *file)
230{
231 return single_open(file, clk_dump, inode->i_private);
232}
233
234static const struct file_operations clk_dump_fops = {
235 .open = clk_dump_open,
236 .read = seq_read,
237 .llseek = seq_lseek,
238 .release = single_release,
239};
240
Mike Turquetteb24764902012-03-15 23:11:19 -0700241/* caller must hold prepare_lock */
242static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
243{
244 struct dentry *d;
245 int ret = -ENOMEM;
246
247 if (!clk || !pdentry) {
248 ret = -EINVAL;
249 goto out;
250 }
251
252 d = debugfs_create_dir(clk->name, pdentry);
253 if (!d)
254 goto out;
255
256 clk->dentry = d;
257
258 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
259 (u32 *)&clk->rate);
260 if (!d)
261 goto err_out;
262
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100263 d = debugfs_create_u32("clk_accuracy", S_IRUGO, clk->dentry,
264 (u32 *)&clk->accuracy);
265 if (!d)
266 goto err_out;
267
Mike Turquettee59c5372014-02-18 21:21:25 -0800268 d = debugfs_create_u32("clk_phase", S_IRUGO, clk->dentry,
269 (u32 *)&clk->phase);
270 if (!d)
271 goto err_out;
272
Mike Turquetteb24764902012-03-15 23:11:19 -0700273 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
274 (u32 *)&clk->flags);
275 if (!d)
276 goto err_out;
277
278 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
279 (u32 *)&clk->prepare_count);
280 if (!d)
281 goto err_out;
282
283 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
284 (u32 *)&clk->enable_count);
285 if (!d)
286 goto err_out;
287
288 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
289 (u32 *)&clk->notifier_count);
290 if (!d)
291 goto err_out;
292
Chris Brandabeab452014-07-03 14:01:29 -0700293 if (clk->ops->debug_init) {
294 ret = clk->ops->debug_init(clk->hw, clk->dentry);
295 if (ret)
Alex Elderc646cbf2014-03-21 06:43:56 -0500296 goto err_out;
Chris Brandabeab452014-07-03 14:01:29 -0700297 }
Alex Elderc646cbf2014-03-21 06:43:56 -0500298
Mike Turquetteb24764902012-03-15 23:11:19 -0700299 ret = 0;
300 goto out;
301
302err_out:
Alex Elderb5f98e62013-11-27 09:39:49 -0600303 debugfs_remove_recursive(clk->dentry);
304 clk->dentry = NULL;
Mike Turquetteb24764902012-03-15 23:11:19 -0700305out:
306 return ret;
307}
308
309/* caller must hold prepare_lock */
310static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
311{
312 struct clk *child;
Mike Turquetteb24764902012-03-15 23:11:19 -0700313 int ret = -EINVAL;;
314
315 if (!clk || !pdentry)
316 goto out;
317
318 ret = clk_debug_create_one(clk, pdentry);
319
320 if (ret)
321 goto out;
322
Sasha Levinb67bfe02013-02-27 17:06:00 -0800323 hlist_for_each_entry(child, &clk->children, child_node)
Peter De Schrijver27b8d5f2014-05-30 18:03:57 +0300324 clk_debug_create_subtree(child, pdentry);
Mike Turquetteb24764902012-03-15 23:11:19 -0700325
326 ret = 0;
327out:
328 return ret;
329}
330
331/**
332 * clk_debug_register - add a clk node to the debugfs clk tree
333 * @clk: the clk being added to the debugfs clk tree
334 *
335 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
336 * initialized. Otherwise it bails out early since the debugfs clk tree
337 * will be created lazily by clk_debug_init as part of a late_initcall.
338 *
339 * Caller must hold prepare_lock. Only clk_init calls this function (so
340 * far) so this is taken care.
341 */
342static int clk_debug_register(struct clk *clk)
343{
Mike Turquetteb24764902012-03-15 23:11:19 -0700344 int ret = 0;
345
346 if (!inited)
347 goto out;
348
Peter De Schrijver27b8d5f2014-05-30 18:03:57 +0300349 ret = clk_debug_create_subtree(clk, rootdir);
Mike Turquetteb24764902012-03-15 23:11:19 -0700350
351out:
352 return ret;
353}
354
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +0200355 /**
356 * clk_debug_unregister - remove a clk node from the debugfs clk tree
357 * @clk: the clk being removed from the debugfs clk tree
358 *
359 * Dynamically removes a clk and all it's children clk nodes from the
360 * debugfs clk tree if clk->dentry points to debugfs created by
361 * clk_debug_register in __clk_init.
362 *
363 * Caller must hold prepare_lock.
364 */
365static void clk_debug_unregister(struct clk *clk)
366{
367 debugfs_remove_recursive(clk->dentry);
368}
369
Peter De Schrijverfb2b3c92014-06-26 18:00:53 +0300370struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode,
371 void *data, const struct file_operations *fops)
372{
373 struct dentry *d = NULL;
374
375 if (clk->dentry)
376 d = debugfs_create_file(name, mode, clk->dentry, data, fops);
377
378 return d;
379}
380EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
381
Mike Turquetteb24764902012-03-15 23:11:19 -0700382/**
383 * clk_debug_init - lazily create the debugfs clk tree visualization
384 *
385 * clks are often initialized very early during boot before memory can
386 * be dynamically allocated and well before debugfs is setup.
387 * clk_debug_init walks the clk tree hierarchy while holding
388 * prepare_lock and creates the topology as part of a late_initcall,
389 * thus insuring that clks initialized very early will still be
390 * represented in the debugfs clk tree. This function should only be
391 * called once at boot-time, and all other clks added dynamically will
392 * be done so with clk_debug_register.
393 */
394static int __init clk_debug_init(void)
395{
396 struct clk *clk;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530397 struct dentry *d;
Mike Turquetteb24764902012-03-15 23:11:19 -0700398
399 rootdir = debugfs_create_dir("clk", NULL);
400
401 if (!rootdir)
402 return -ENOMEM;
403
Peter De Schrijver27b8d5f2014-05-30 18:03:57 +0300404 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530405 &clk_summary_fops);
406 if (!d)
407 return -ENOMEM;
408
Peter De Schrijver27b8d5f2014-05-30 18:03:57 +0300409 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530410 &clk_dump_fops);
411 if (!d)
412 return -ENOMEM;
413
Peter De Schrijver27b8d5f2014-05-30 18:03:57 +0300414 d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
415 &orphan_list, &clk_summary_fops);
416 if (!d)
417 return -ENOMEM;
Mike Turquetteb24764902012-03-15 23:11:19 -0700418
Peter De Schrijver27b8d5f2014-05-30 18:03:57 +0300419 d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
420 &orphan_list, &clk_dump_fops);
421 if (!d)
Mike Turquetteb24764902012-03-15 23:11:19 -0700422 return -ENOMEM;
423
Mike Turquetteeab89f62013-03-28 13:59:01 -0700424 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700425
Sasha Levinb67bfe02013-02-27 17:06:00 -0800426 hlist_for_each_entry(clk, &clk_root_list, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700427 clk_debug_create_subtree(clk, rootdir);
428
Sasha Levinb67bfe02013-02-27 17:06:00 -0800429 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
Peter De Schrijver27b8d5f2014-05-30 18:03:57 +0300430 clk_debug_create_subtree(clk, rootdir);
Mike Turquetteb24764902012-03-15 23:11:19 -0700431
432 inited = 1;
433
Mike Turquetteeab89f62013-03-28 13:59:01 -0700434 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700435
436 return 0;
437}
438late_initcall(clk_debug_init);
439#else
440static inline int clk_debug_register(struct clk *clk) { return 0; }
Ulf Hanssonb33d2122013-04-02 23:09:37 +0200441static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
442{
443}
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +0200444static inline void clk_debug_unregister(struct clk *clk)
445{
446}
Mike Turquette70d347e2012-03-26 11:53:47 -0700447#endif
Mike Turquetteb24764902012-03-15 23:11:19 -0700448
Mike Turquetteb24764902012-03-15 23:11:19 -0700449/* caller must hold prepare_lock */
Ulf Hansson1c155b32013-03-12 20:26:03 +0100450static void clk_unprepare_unused_subtree(struct clk *clk)
451{
452 struct clk *child;
453
454 if (!clk)
455 return;
456
457 hlist_for_each_entry(child, &clk->children, child_node)
458 clk_unprepare_unused_subtree(child);
459
460 if (clk->prepare_count)
461 return;
462
463 if (clk->flags & CLK_IGNORE_UNUSED)
464 return;
465
Ulf Hansson3cc82472013-03-12 20:26:04 +0100466 if (__clk_is_prepared(clk)) {
467 if (clk->ops->unprepare_unused)
468 clk->ops->unprepare_unused(clk->hw);
469 else if (clk->ops->unprepare)
Ulf Hansson1c155b32013-03-12 20:26:03 +0100470 clk->ops->unprepare(clk->hw);
Ulf Hansson3cc82472013-03-12 20:26:04 +0100471 }
Ulf Hansson1c155b32013-03-12 20:26:03 +0100472}
473
474/* caller must hold prepare_lock */
Mike Turquetteb24764902012-03-15 23:11:19 -0700475static void clk_disable_unused_subtree(struct clk *clk)
476{
477 struct clk *child;
Mike Turquetteb24764902012-03-15 23:11:19 -0700478 unsigned long flags;
479
480 if (!clk)
481 goto out;
482
Sasha Levinb67bfe02013-02-27 17:06:00 -0800483 hlist_for_each_entry(child, &clk->children, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700484 clk_disable_unused_subtree(child);
485
Mike Turquetteeab89f62013-03-28 13:59:01 -0700486 flags = clk_enable_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700487
488 if (clk->enable_count)
489 goto unlock_out;
490
491 if (clk->flags & CLK_IGNORE_UNUSED)
492 goto unlock_out;
493
Mike Turquette7c045a52012-12-04 11:00:35 -0800494 /*
495 * some gate clocks have special needs during the disable-unused
496 * sequence. call .disable_unused if available, otherwise fall
497 * back to .disable
498 */
499 if (__clk_is_enabled(clk)) {
500 if (clk->ops->disable_unused)
501 clk->ops->disable_unused(clk->hw);
502 else if (clk->ops->disable)
503 clk->ops->disable(clk->hw);
504 }
Mike Turquetteb24764902012-03-15 23:11:19 -0700505
506unlock_out:
Mike Turquetteeab89f62013-03-28 13:59:01 -0700507 clk_enable_unlock(flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700508
509out:
510 return;
511}
512
Olof Johansson1e435252013-04-27 14:10:18 -0700513static bool clk_ignore_unused;
514static int __init clk_ignore_unused_setup(char *__unused)
515{
516 clk_ignore_unused = true;
517 return 1;
518}
519__setup("clk_ignore_unused", clk_ignore_unused_setup);
520
Mike Turquetteb24764902012-03-15 23:11:19 -0700521static int clk_disable_unused(void)
522{
523 struct clk *clk;
Mike Turquetteb24764902012-03-15 23:11:19 -0700524
Olof Johansson1e435252013-04-27 14:10:18 -0700525 if (clk_ignore_unused) {
526 pr_warn("clk: Not disabling unused clocks\n");
527 return 0;
528 }
529
Mike Turquetteeab89f62013-03-28 13:59:01 -0700530 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700531
Sasha Levinb67bfe02013-02-27 17:06:00 -0800532 hlist_for_each_entry(clk, &clk_root_list, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700533 clk_disable_unused_subtree(clk);
534
Sasha Levinb67bfe02013-02-27 17:06:00 -0800535 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700536 clk_disable_unused_subtree(clk);
537
Ulf Hansson1c155b32013-03-12 20:26:03 +0100538 hlist_for_each_entry(clk, &clk_root_list, child_node)
539 clk_unprepare_unused_subtree(clk);
540
541 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
542 clk_unprepare_unused_subtree(clk);
543
Mike Turquetteeab89f62013-03-28 13:59:01 -0700544 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700545
546 return 0;
547}
Saravana Kannand41d5802013-05-09 11:35:01 -0700548late_initcall_sync(clk_disable_unused);
Mike Turquetteb24764902012-03-15 23:11:19 -0700549
550/*** helper functions ***/
551
Russ Dill65800b22012-11-26 11:20:09 -0800552const char *__clk_get_name(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700553{
554 return !clk ? NULL : clk->name;
555}
Niels de Vos48950842012-12-13 13:12:25 +0100556EXPORT_SYMBOL_GPL(__clk_get_name);
Mike Turquetteb24764902012-03-15 23:11:19 -0700557
Russ Dill65800b22012-11-26 11:20:09 -0800558struct clk_hw *__clk_get_hw(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700559{
560 return !clk ? NULL : clk->hw;
561}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800562EXPORT_SYMBOL_GPL(__clk_get_hw);
Mike Turquetteb24764902012-03-15 23:11:19 -0700563
Russ Dill65800b22012-11-26 11:20:09 -0800564u8 __clk_get_num_parents(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700565{
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700566 return !clk ? 0 : clk->num_parents;
Mike Turquetteb24764902012-03-15 23:11:19 -0700567}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800568EXPORT_SYMBOL_GPL(__clk_get_num_parents);
Mike Turquetteb24764902012-03-15 23:11:19 -0700569
Russ Dill65800b22012-11-26 11:20:09 -0800570struct clk *__clk_get_parent(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700571{
572 return !clk ? NULL : clk->parent;
573}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800574EXPORT_SYMBOL_GPL(__clk_get_parent);
Mike Turquetteb24764902012-03-15 23:11:19 -0700575
James Hogan7ef3dcc2013-07-29 12:24:58 +0100576struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
577{
578 if (!clk || index >= clk->num_parents)
579 return NULL;
580 else if (!clk->parents)
581 return __clk_lookup(clk->parent_names[index]);
582 else if (!clk->parents[index])
583 return clk->parents[index] =
584 __clk_lookup(clk->parent_names[index]);
585 else
586 return clk->parents[index];
587}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800588EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
James Hogan7ef3dcc2013-07-29 12:24:58 +0100589
Russ Dill65800b22012-11-26 11:20:09 -0800590unsigned int __clk_get_enable_count(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700591{
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700592 return !clk ? 0 : clk->enable_count;
Mike Turquetteb24764902012-03-15 23:11:19 -0700593}
594
Russ Dill65800b22012-11-26 11:20:09 -0800595unsigned int __clk_get_prepare_count(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700596{
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700597 return !clk ? 0 : clk->prepare_count;
Mike Turquetteb24764902012-03-15 23:11:19 -0700598}
599
600unsigned long __clk_get_rate(struct clk *clk)
601{
602 unsigned long ret;
603
604 if (!clk) {
Rajendra Nayak34e44fe2012-03-26 19:01:48 +0530605 ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700606 goto out;
607 }
608
609 ret = clk->rate;
610
611 if (clk->flags & CLK_IS_ROOT)
612 goto out;
613
614 if (!clk->parent)
Rajendra Nayak34e44fe2012-03-26 19:01:48 +0530615 ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700616
617out:
618 return ret;
619}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800620EXPORT_SYMBOL_GPL(__clk_get_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -0700621
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100622unsigned long __clk_get_accuracy(struct clk *clk)
623{
624 if (!clk)
625 return 0;
626
627 return clk->accuracy;
628}
629
Russ Dill65800b22012-11-26 11:20:09 -0800630unsigned long __clk_get_flags(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700631{
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700632 return !clk ? 0 : clk->flags;
Mike Turquetteb24764902012-03-15 23:11:19 -0700633}
Thierry Redingb05c6832013-09-03 09:43:51 +0200634EXPORT_SYMBOL_GPL(__clk_get_flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700635
Ulf Hansson3d6ee282013-03-12 20:26:02 +0100636bool __clk_is_prepared(struct clk *clk)
637{
638 int ret;
639
640 if (!clk)
641 return false;
642
643 /*
644 * .is_prepared is optional for clocks that can prepare
645 * fall back to software usage counter if it is missing
646 */
647 if (!clk->ops->is_prepared) {
648 ret = clk->prepare_count ? 1 : 0;
649 goto out;
650 }
651
652 ret = clk->ops->is_prepared(clk->hw);
653out:
654 return !!ret;
655}
656
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700657bool __clk_is_enabled(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700658{
659 int ret;
660
661 if (!clk)
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700662 return false;
Mike Turquetteb24764902012-03-15 23:11:19 -0700663
664 /*
665 * .is_enabled is only mandatory for clocks that gate
666 * fall back to software usage counter if .is_enabled is missing
667 */
668 if (!clk->ops->is_enabled) {
669 ret = clk->enable_count ? 1 : 0;
670 goto out;
671 }
672
673 ret = clk->ops->is_enabled(clk->hw);
674out:
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700675 return !!ret;
Mike Turquetteb24764902012-03-15 23:11:19 -0700676}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800677EXPORT_SYMBOL_GPL(__clk_is_enabled);
Mike Turquetteb24764902012-03-15 23:11:19 -0700678
679static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
680{
681 struct clk *child;
682 struct clk *ret;
Mike Turquetteb24764902012-03-15 23:11:19 -0700683
684 if (!strcmp(clk->name, name))
685 return clk;
686
Sasha Levinb67bfe02013-02-27 17:06:00 -0800687 hlist_for_each_entry(child, &clk->children, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -0700688 ret = __clk_lookup_subtree(name, child);
689 if (ret)
690 return ret;
691 }
692
693 return NULL;
694}
695
696struct clk *__clk_lookup(const char *name)
697{
698 struct clk *root_clk;
699 struct clk *ret;
Mike Turquetteb24764902012-03-15 23:11:19 -0700700
701 if (!name)
702 return NULL;
703
704 /* search the 'proper' clk tree first */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800705 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -0700706 ret = __clk_lookup_subtree(name, root_clk);
707 if (ret)
708 return ret;
709 }
710
711 /* if not found, then search the orphan tree */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800712 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -0700713 ret = __clk_lookup_subtree(name, root_clk);
714 if (ret)
715 return ret;
716 }
717
718 return NULL;
719}
720
James Hogane366fdd2013-07-29 12:25:02 +0100721/*
722 * Helper for finding best parent to provide a given frequency. This can be used
723 * directly as a determine_rate callback (e.g. for a mux), or from a more
724 * complex clock that may combine a mux with other operations.
725 */
726long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
727 unsigned long *best_parent_rate,
728 struct clk **best_parent_p)
729{
730 struct clk *clk = hw->clk, *parent, *best_parent = NULL;
731 int i, num_parents;
732 unsigned long parent_rate, best = 0;
733
734 /* if NO_REPARENT flag set, pass through to current parent */
735 if (clk->flags & CLK_SET_RATE_NO_REPARENT) {
736 parent = clk->parent;
737 if (clk->flags & CLK_SET_RATE_PARENT)
738 best = __clk_round_rate(parent, rate);
739 else if (parent)
740 best = __clk_get_rate(parent);
741 else
742 best = __clk_get_rate(clk);
743 goto out;
744 }
745
746 /* find the parent that can provide the fastest rate <= rate */
747 num_parents = clk->num_parents;
748 for (i = 0; i < num_parents; i++) {
749 parent = clk_get_parent_by_index(clk, i);
750 if (!parent)
751 continue;
752 if (clk->flags & CLK_SET_RATE_PARENT)
753 parent_rate = __clk_round_rate(parent, rate);
754 else
755 parent_rate = __clk_get_rate(parent);
756 if (parent_rate <= rate && parent_rate > best) {
757 best_parent = parent;
758 best = parent_rate;
759 }
760 }
761
762out:
763 if (best_parent)
764 *best_parent_p = best_parent;
765 *best_parent_rate = best;
766
767 return best;
768}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800769EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
James Hogane366fdd2013-07-29 12:25:02 +0100770
Mike Turquetteb24764902012-03-15 23:11:19 -0700771/*** clk api ***/
772
773void __clk_unprepare(struct clk *clk)
774{
775 if (!clk)
776 return;
777
778 if (WARN_ON(clk->prepare_count == 0))
779 return;
780
781 if (--clk->prepare_count > 0)
782 return;
783
784 WARN_ON(clk->enable_count > 0);
785
786 if (clk->ops->unprepare)
787 clk->ops->unprepare(clk->hw);
788
789 __clk_unprepare(clk->parent);
790}
791
792/**
793 * clk_unprepare - undo preparation of a clock source
Peter Meerwald24ee1a02013-06-29 15:14:19 +0200794 * @clk: the clk being unprepared
Mike Turquetteb24764902012-03-15 23:11:19 -0700795 *
796 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
797 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
798 * if the operation may sleep. One example is a clk which is accessed over
799 * I2c. In the complex case a clk gate operation may require a fast and a slow
800 * part. It is this reason that clk_unprepare and clk_disable are not mutually
801 * exclusive. In fact clk_disable must be called before clk_unprepare.
802 */
803void clk_unprepare(struct clk *clk)
804{
Stephen Boyd63589e92014-03-26 16:06:37 -0700805 if (IS_ERR_OR_NULL(clk))
806 return;
807
Mike Turquetteeab89f62013-03-28 13:59:01 -0700808 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700809 __clk_unprepare(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700810 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700811}
812EXPORT_SYMBOL_GPL(clk_unprepare);
813
814int __clk_prepare(struct clk *clk)
815{
816 int ret = 0;
817
818 if (!clk)
819 return 0;
820
821 if (clk->prepare_count == 0) {
822 ret = __clk_prepare(clk->parent);
823 if (ret)
824 return ret;
825
826 if (clk->ops->prepare) {
827 ret = clk->ops->prepare(clk->hw);
828 if (ret) {
829 __clk_unprepare(clk->parent);
830 return ret;
831 }
832 }
833 }
834
835 clk->prepare_count++;
836
837 return 0;
838}
839
840/**
841 * clk_prepare - prepare a clock source
842 * @clk: the clk being prepared
843 *
844 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
845 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
846 * operation may sleep. One example is a clk which is accessed over I2c. In
847 * the complex case a clk ungate operation may require a fast and a slow part.
848 * It is this reason that clk_prepare and clk_enable are not mutually
849 * exclusive. In fact clk_prepare must be called before clk_enable.
850 * Returns 0 on success, -EERROR otherwise.
851 */
852int clk_prepare(struct clk *clk)
853{
854 int ret;
855
Mike Turquetteeab89f62013-03-28 13:59:01 -0700856 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700857 ret = __clk_prepare(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700858 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700859
860 return ret;
861}
862EXPORT_SYMBOL_GPL(clk_prepare);
863
864static void __clk_disable(struct clk *clk)
865{
866 if (!clk)
867 return;
868
869 if (WARN_ON(clk->enable_count == 0))
870 return;
871
872 if (--clk->enable_count > 0)
873 return;
874
875 if (clk->ops->disable)
876 clk->ops->disable(clk->hw);
877
878 __clk_disable(clk->parent);
879}
880
881/**
882 * clk_disable - gate a clock
883 * @clk: the clk being gated
884 *
885 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
886 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
887 * clk if the operation is fast and will never sleep. One example is a
888 * SoC-internal clk which is controlled via simple register writes. In the
889 * complex case a clk gate operation may require a fast and a slow part. It is
890 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
891 * In fact clk_disable must be called before clk_unprepare.
892 */
893void clk_disable(struct clk *clk)
894{
895 unsigned long flags;
896
Stephen Boyd63589e92014-03-26 16:06:37 -0700897 if (IS_ERR_OR_NULL(clk))
898 return;
899
Mike Turquetteeab89f62013-03-28 13:59:01 -0700900 flags = clk_enable_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700901 __clk_disable(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700902 clk_enable_unlock(flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700903}
904EXPORT_SYMBOL_GPL(clk_disable);
905
906static int __clk_enable(struct clk *clk)
907{
908 int ret = 0;
909
910 if (!clk)
911 return 0;
912
913 if (WARN_ON(clk->prepare_count == 0))
914 return -ESHUTDOWN;
915
916 if (clk->enable_count == 0) {
917 ret = __clk_enable(clk->parent);
918
919 if (ret)
920 return ret;
921
922 if (clk->ops->enable) {
923 ret = clk->ops->enable(clk->hw);
924 if (ret) {
925 __clk_disable(clk->parent);
926 return ret;
927 }
928 }
929 }
930
931 clk->enable_count++;
932 return 0;
933}
934
935/**
936 * clk_enable - ungate a clock
937 * @clk: the clk being ungated
938 *
939 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
940 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
941 * if the operation will never sleep. One example is a SoC-internal clk which
942 * is controlled via simple register writes. In the complex case a clk ungate
943 * operation may require a fast and a slow part. It is this reason that
944 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
945 * must be called before clk_enable. Returns 0 on success, -EERROR
946 * otherwise.
947 */
948int clk_enable(struct clk *clk)
949{
950 unsigned long flags;
951 int ret;
952
Mike Turquetteeab89f62013-03-28 13:59:01 -0700953 flags = clk_enable_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700954 ret = __clk_enable(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700955 clk_enable_unlock(flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700956
957 return ret;
958}
959EXPORT_SYMBOL_GPL(clk_enable);
960
961/**
Mike Turquetteb24764902012-03-15 23:11:19 -0700962 * __clk_round_rate - round the given rate for a clk
963 * @clk: round the rate of this clock
Peter Meerwald24ee1a02013-06-29 15:14:19 +0200964 * @rate: the rate which is to be rounded
Mike Turquetteb24764902012-03-15 23:11:19 -0700965 *
966 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
967 */
968unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
969{
Shawn Guo81536e02012-04-12 20:50:17 +0800970 unsigned long parent_rate = 0;
James Hogan71472c02013-07-29 12:25:00 +0100971 struct clk *parent;
Mike Turquetteb24764902012-03-15 23:11:19 -0700972
973 if (!clk)
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700974 return 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700975
James Hogan71472c02013-07-29 12:25:00 +0100976 parent = clk->parent;
977 if (parent)
978 parent_rate = parent->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -0700979
James Hogan71472c02013-07-29 12:25:00 +0100980 if (clk->ops->determine_rate)
981 return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
982 &parent);
983 else if (clk->ops->round_rate)
984 return clk->ops->round_rate(clk->hw, rate, &parent_rate);
985 else if (clk->flags & CLK_SET_RATE_PARENT)
986 return __clk_round_rate(clk->parent, rate);
987 else
988 return clk->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -0700989}
Arnd Bergmann1cdf8ee2014-06-03 11:40:14 +0200990EXPORT_SYMBOL_GPL(__clk_round_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -0700991
992/**
993 * clk_round_rate - round the given rate for a clk
994 * @clk: the clk for which we are rounding a rate
995 * @rate: the rate which is to be rounded
996 *
997 * Takes in a rate as input and rounds it to a rate that the clk can actually
998 * use which is then returned. If clk doesn't support round_rate operation
999 * then the parent rate is returned.
1000 */
1001long clk_round_rate(struct clk *clk, unsigned long rate)
1002{
1003 unsigned long ret;
1004
Mike Turquetteeab89f62013-03-28 13:59:01 -07001005 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001006 ret = __clk_round_rate(clk, rate);
Mike Turquetteeab89f62013-03-28 13:59:01 -07001007 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001008
1009 return ret;
1010}
1011EXPORT_SYMBOL_GPL(clk_round_rate);
1012
1013/**
1014 * __clk_notify - call clk notifier chain
1015 * @clk: struct clk * that is changing rate
1016 * @msg: clk notifier type (see include/linux/clk.h)
1017 * @old_rate: old clk rate
1018 * @new_rate: new clk rate
1019 *
1020 * Triggers a notifier call chain on the clk rate-change notification
1021 * for 'clk'. Passes a pointer to the struct clk and the previous
1022 * and current rates to the notifier callback. Intended to be called by
1023 * internal clock code only. Returns NOTIFY_DONE from the last driver
1024 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1025 * a driver returns that.
1026 */
1027static int __clk_notify(struct clk *clk, unsigned long msg,
1028 unsigned long old_rate, unsigned long new_rate)
1029{
1030 struct clk_notifier *cn;
1031 struct clk_notifier_data cnd;
1032 int ret = NOTIFY_DONE;
1033
1034 cnd.clk = clk;
1035 cnd.old_rate = old_rate;
1036 cnd.new_rate = new_rate;
1037
1038 list_for_each_entry(cn, &clk_notifier_list, node) {
1039 if (cn->clk == clk) {
1040 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1041 &cnd);
1042 break;
1043 }
1044 }
1045
1046 return ret;
1047}
1048
1049/**
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001050 * __clk_recalc_accuracies
1051 * @clk: first clk in the subtree
1052 *
1053 * Walks the subtree of clks starting with clk and recalculates accuracies as
1054 * it goes. Note that if a clk does not implement the .recalc_accuracy
1055 * callback then it is assumed that the clock will take on the accuracy of it's
1056 * parent.
1057 *
1058 * Caller must hold prepare_lock.
1059 */
1060static void __clk_recalc_accuracies(struct clk *clk)
1061{
1062 unsigned long parent_accuracy = 0;
1063 struct clk *child;
1064
1065 if (clk->parent)
1066 parent_accuracy = clk->parent->accuracy;
1067
1068 if (clk->ops->recalc_accuracy)
1069 clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
1070 parent_accuracy);
1071 else
1072 clk->accuracy = parent_accuracy;
1073
1074 hlist_for_each_entry(child, &clk->children, child_node)
1075 __clk_recalc_accuracies(child);
1076}
1077
1078/**
1079 * clk_get_accuracy - return the accuracy of clk
1080 * @clk: the clk whose accuracy is being returned
1081 *
1082 * Simply returns the cached accuracy of the clk, unless
1083 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1084 * issued.
1085 * If clk is NULL then returns 0.
1086 */
1087long clk_get_accuracy(struct clk *clk)
1088{
1089 unsigned long accuracy;
1090
1091 clk_prepare_lock();
1092 if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
1093 __clk_recalc_accuracies(clk);
1094
1095 accuracy = __clk_get_accuracy(clk);
1096 clk_prepare_unlock();
1097
1098 return accuracy;
1099}
1100EXPORT_SYMBOL_GPL(clk_get_accuracy);
1101
Stephen Boyd8f2c2db2014-03-26 16:06:36 -07001102static unsigned long clk_recalc(struct clk *clk, unsigned long parent_rate)
1103{
1104 if (clk->ops->recalc_rate)
1105 return clk->ops->recalc_rate(clk->hw, parent_rate);
1106 return parent_rate;
1107}
1108
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001109/**
Mike Turquetteb24764902012-03-15 23:11:19 -07001110 * __clk_recalc_rates
1111 * @clk: first clk in the subtree
1112 * @msg: notification type (see include/linux/clk.h)
1113 *
1114 * Walks the subtree of clks starting with clk and recalculates rates as it
1115 * goes. Note that if a clk does not implement the .recalc_rate callback then
Peter Meerwald24ee1a02013-06-29 15:14:19 +02001116 * it is assumed that the clock will take on the rate of its parent.
Mike Turquetteb24764902012-03-15 23:11:19 -07001117 *
1118 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1119 * if necessary.
1120 *
1121 * Caller must hold prepare_lock.
1122 */
1123static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
1124{
1125 unsigned long old_rate;
1126 unsigned long parent_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001127 struct clk *child;
1128
1129 old_rate = clk->rate;
1130
1131 if (clk->parent)
1132 parent_rate = clk->parent->rate;
1133
Stephen Boyd8f2c2db2014-03-26 16:06:36 -07001134 clk->rate = clk_recalc(clk, parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001135
1136 /*
1137 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1138 * & ABORT_RATE_CHANGE notifiers
1139 */
1140 if (clk->notifier_count && msg)
1141 __clk_notify(clk, msg, old_rate, clk->rate);
1142
Sasha Levinb67bfe02013-02-27 17:06:00 -08001143 hlist_for_each_entry(child, &clk->children, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -07001144 __clk_recalc_rates(child, msg);
1145}
1146
1147/**
Ulf Hanssona093bde2012-08-31 14:21:28 +02001148 * clk_get_rate - return the rate of clk
1149 * @clk: the clk whose rate is being returned
1150 *
1151 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1152 * is set, which means a recalc_rate will be issued.
1153 * If clk is NULL then returns 0.
1154 */
1155unsigned long clk_get_rate(struct clk *clk)
1156{
1157 unsigned long rate;
1158
Mike Turquetteeab89f62013-03-28 13:59:01 -07001159 clk_prepare_lock();
Ulf Hanssona093bde2012-08-31 14:21:28 +02001160
1161 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
1162 __clk_recalc_rates(clk, 0);
1163
1164 rate = __clk_get_rate(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -07001165 clk_prepare_unlock();
Ulf Hanssona093bde2012-08-31 14:21:28 +02001166
1167 return rate;
1168}
1169EXPORT_SYMBOL_GPL(clk_get_rate);
1170
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001171static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
James Hogan4935b222013-07-29 12:24:59 +01001172{
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001173 int i;
James Hogan4935b222013-07-29 12:24:59 +01001174
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001175 if (!clk->parents) {
Tomasz Figa96a7ed92013-09-29 02:37:15 +02001176 clk->parents = kcalloc(clk->num_parents,
1177 sizeof(struct clk *), GFP_KERNEL);
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001178 if (!clk->parents)
1179 return -ENOMEM;
1180 }
James Hogan4935b222013-07-29 12:24:59 +01001181
1182 /*
1183 * find index of new parent clock using cached parent ptrs,
1184 * or if not yet cached, use string name comparison and cache
1185 * them now to avoid future calls to __clk_lookup.
1186 */
1187 for (i = 0; i < clk->num_parents; i++) {
Tomasz Figada0f0b22013-09-29 02:37:16 +02001188 if (clk->parents[i] == parent)
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001189 return i;
Tomasz Figada0f0b22013-09-29 02:37:16 +02001190
1191 if (clk->parents[i])
1192 continue;
1193
1194 if (!strcmp(clk->parent_names[i], parent->name)) {
1195 clk->parents[i] = __clk_lookup(parent->name);
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001196 return i;
James Hogan4935b222013-07-29 12:24:59 +01001197 }
1198 }
1199
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001200 return -EINVAL;
James Hogan4935b222013-07-29 12:24:59 +01001201}
1202
1203static void clk_reparent(struct clk *clk, struct clk *new_parent)
1204{
1205 hlist_del(&clk->child_node);
1206
James Hogan903efc52013-08-29 12:10:51 +01001207 if (new_parent) {
1208 /* avoid duplicate POST_RATE_CHANGE notifications */
1209 if (new_parent->new_child == clk)
1210 new_parent->new_child = NULL;
1211
James Hogan4935b222013-07-29 12:24:59 +01001212 hlist_add_head(&clk->child_node, &new_parent->children);
James Hogan903efc52013-08-29 12:10:51 +01001213 } else {
James Hogan4935b222013-07-29 12:24:59 +01001214 hlist_add_head(&clk->child_node, &clk_orphan_list);
James Hogan903efc52013-08-29 12:10:51 +01001215 }
James Hogan4935b222013-07-29 12:24:59 +01001216
1217 clk->parent = new_parent;
1218}
1219
Stephen Boyd3fa22522014-01-15 10:47:22 -08001220static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
James Hogan4935b222013-07-29 12:24:59 +01001221{
1222 unsigned long flags;
James Hogan4935b222013-07-29 12:24:59 +01001223 struct clk *old_parent = clk->parent;
1224
1225 /*
1226 * Migrate prepare state between parents and prevent race with
1227 * clk_enable().
1228 *
1229 * If the clock is not prepared, then a race with
1230 * clk_enable/disable() is impossible since we already have the
1231 * prepare lock (future calls to clk_enable() need to be preceded by
1232 * a clk_prepare()).
1233 *
1234 * If the clock is prepared, migrate the prepared state to the new
1235 * parent and also protect against a race with clk_enable() by
1236 * forcing the clock and the new parent on. This ensures that all
1237 * future calls to clk_enable() are practically NOPs with respect to
1238 * hardware and software states.
1239 *
1240 * See also: Comment for clk_set_parent() below.
1241 */
1242 if (clk->prepare_count) {
1243 __clk_prepare(parent);
1244 clk_enable(parent);
1245 clk_enable(clk);
1246 }
1247
1248 /* update the clk tree topology */
1249 flags = clk_enable_lock();
1250 clk_reparent(clk, parent);
1251 clk_enable_unlock(flags);
1252
Stephen Boyd3fa22522014-01-15 10:47:22 -08001253 return old_parent;
1254}
1255
1256static void __clk_set_parent_after(struct clk *clk, struct clk *parent,
1257 struct clk *old_parent)
1258{
1259 /*
1260 * Finish the migration of prepare state and undo the changes done
1261 * for preventing a race with clk_enable().
1262 */
1263 if (clk->prepare_count) {
1264 clk_disable(clk);
1265 clk_disable(old_parent);
1266 __clk_unprepare(old_parent);
1267 }
Stephen Boyd3fa22522014-01-15 10:47:22 -08001268}
1269
1270static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1271{
1272 unsigned long flags;
1273 int ret = 0;
1274 struct clk *old_parent;
1275
1276 old_parent = __clk_set_parent_before(clk, parent);
1277
James Hogan4935b222013-07-29 12:24:59 +01001278 /* change clock input source */
1279 if (parent && clk->ops->set_parent)
1280 ret = clk->ops->set_parent(clk->hw, p_index);
1281
1282 if (ret) {
1283 flags = clk_enable_lock();
1284 clk_reparent(clk, old_parent);
1285 clk_enable_unlock(flags);
1286
1287 if (clk->prepare_count) {
1288 clk_disable(clk);
1289 clk_disable(parent);
1290 __clk_unprepare(parent);
1291 }
1292 return ret;
1293 }
1294
Stephen Boyd3fa22522014-01-15 10:47:22 -08001295 __clk_set_parent_after(clk, parent, old_parent);
James Hogan4935b222013-07-29 12:24:59 +01001296
James Hogan4935b222013-07-29 12:24:59 +01001297 return 0;
1298}
1299
Ulf Hanssona093bde2012-08-31 14:21:28 +02001300/**
Mike Turquetteb24764902012-03-15 23:11:19 -07001301 * __clk_speculate_rates
1302 * @clk: first clk in the subtree
1303 * @parent_rate: the "future" rate of clk's parent
1304 *
1305 * Walks the subtree of clks starting with clk, speculating rates as it
1306 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1307 *
1308 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1309 * pre-rate change notifications and returns early if no clks in the
1310 * subtree have subscribed to the notifications. Note that if a clk does not
1311 * implement the .recalc_rate callback then it is assumed that the clock will
Peter Meerwald24ee1a02013-06-29 15:14:19 +02001312 * take on the rate of its parent.
Mike Turquetteb24764902012-03-15 23:11:19 -07001313 *
1314 * Caller must hold prepare_lock.
1315 */
1316static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
1317{
Mike Turquetteb24764902012-03-15 23:11:19 -07001318 struct clk *child;
1319 unsigned long new_rate;
1320 int ret = NOTIFY_DONE;
1321
Stephen Boyd8f2c2db2014-03-26 16:06:36 -07001322 new_rate = clk_recalc(clk, parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001323
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001324 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
Mike Turquetteb24764902012-03-15 23:11:19 -07001325 if (clk->notifier_count)
1326 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
1327
Mike Turquette86bcfa22014-02-24 16:08:41 -08001328 if (ret & NOTIFY_STOP_MASK) {
1329 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1330 __func__, clk->name, ret);
Mike Turquetteb24764902012-03-15 23:11:19 -07001331 goto out;
Mike Turquette86bcfa22014-02-24 16:08:41 -08001332 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001333
Sasha Levinb67bfe02013-02-27 17:06:00 -08001334 hlist_for_each_entry(child, &clk->children, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -07001335 ret = __clk_speculate_rates(child, new_rate);
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001336 if (ret & NOTIFY_STOP_MASK)
Mike Turquetteb24764902012-03-15 23:11:19 -07001337 break;
1338 }
1339
1340out:
1341 return ret;
1342}
1343
James Hogan71472c02013-07-29 12:25:00 +01001344static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
1345 struct clk *new_parent, u8 p_index)
Mike Turquetteb24764902012-03-15 23:11:19 -07001346{
1347 struct clk *child;
Mike Turquetteb24764902012-03-15 23:11:19 -07001348
1349 clk->new_rate = new_rate;
James Hogan71472c02013-07-29 12:25:00 +01001350 clk->new_parent = new_parent;
1351 clk->new_parent_index = p_index;
1352 /* include clk in new parent's PRE_RATE_CHANGE notifications */
1353 clk->new_child = NULL;
1354 if (new_parent && new_parent != clk->parent)
1355 new_parent->new_child = clk;
Mike Turquetteb24764902012-03-15 23:11:19 -07001356
Sasha Levinb67bfe02013-02-27 17:06:00 -08001357 hlist_for_each_entry(child, &clk->children, child_node) {
Stephen Boyd8f2c2db2014-03-26 16:06:36 -07001358 child->new_rate = clk_recalc(child, new_rate);
James Hogan71472c02013-07-29 12:25:00 +01001359 clk_calc_subtree(child, child->new_rate, NULL, 0);
Mike Turquetteb24764902012-03-15 23:11:19 -07001360 }
1361}
1362
1363/*
1364 * calculate the new rates returning the topmost clock that has to be
1365 * changed.
1366 */
1367static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
1368{
1369 struct clk *top = clk;
James Hogan71472c02013-07-29 12:25:00 +01001370 struct clk *old_parent, *parent;
Shawn Guo81536e02012-04-12 20:50:17 +08001371 unsigned long best_parent_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001372 unsigned long new_rate;
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001373 int p_index = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001374
Mike Turquette7452b212012-03-26 14:45:36 -07001375 /* sanity */
1376 if (IS_ERR_OR_NULL(clk))
1377 return NULL;
1378
Mike Turquette63f5c3b2012-05-02 16:23:43 -07001379 /* save parent rate, if it exists */
James Hogan71472c02013-07-29 12:25:00 +01001380 parent = old_parent = clk->parent;
1381 if (parent)
1382 best_parent_rate = parent->rate;
Mike Turquette63f5c3b2012-05-02 16:23:43 -07001383
James Hogan71472c02013-07-29 12:25:00 +01001384 /* find the closest rate and parent clk/rate */
1385 if (clk->ops->determine_rate) {
1386 new_rate = clk->ops->determine_rate(clk->hw, rate,
1387 &best_parent_rate,
1388 &parent);
1389 } else if (clk->ops->round_rate) {
1390 new_rate = clk->ops->round_rate(clk->hw, rate,
1391 &best_parent_rate);
1392 } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
1393 /* pass-through clock without adjustable parent */
1394 clk->new_rate = clk->rate;
1395 return NULL;
1396 } else {
1397 /* pass-through clock with adjustable parent */
1398 top = clk_calc_new_rates(parent, rate);
1399 new_rate = parent->new_rate;
Mike Turquette63f5c3b2012-05-02 16:23:43 -07001400 goto out;
Mike Turquette7452b212012-03-26 14:45:36 -07001401 }
1402
James Hogan71472c02013-07-29 12:25:00 +01001403 /* some clocks must be gated to change parent */
1404 if (parent != old_parent &&
1405 (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
1406 pr_debug("%s: %s not gated but wants to reparent\n",
1407 __func__, clk->name);
Mike Turquetteb24764902012-03-15 23:11:19 -07001408 return NULL;
1409 }
1410
James Hogan71472c02013-07-29 12:25:00 +01001411 /* try finding the new parent index */
1412 if (parent) {
1413 p_index = clk_fetch_parent_index(clk, parent);
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001414 if (p_index < 0) {
James Hogan71472c02013-07-29 12:25:00 +01001415 pr_debug("%s: clk %s can not be parent of clk %s\n",
1416 __func__, parent->name, clk->name);
1417 return NULL;
1418 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001419 }
1420
James Hogan71472c02013-07-29 12:25:00 +01001421 if ((clk->flags & CLK_SET_RATE_PARENT) && parent &&
1422 best_parent_rate != parent->rate)
1423 top = clk_calc_new_rates(parent, best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001424
1425out:
James Hogan71472c02013-07-29 12:25:00 +01001426 clk_calc_subtree(clk, new_rate, parent, p_index);
Mike Turquetteb24764902012-03-15 23:11:19 -07001427
1428 return top;
1429}
1430
1431/*
1432 * Notify about rate changes in a subtree. Always walk down the whole tree
1433 * so that in case of an error we can walk down the whole tree again and
1434 * abort the change.
1435 */
1436static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
1437{
James Hogan71472c02013-07-29 12:25:00 +01001438 struct clk *child, *tmp_clk, *fail_clk = NULL;
Mike Turquetteb24764902012-03-15 23:11:19 -07001439 int ret = NOTIFY_DONE;
1440
1441 if (clk->rate == clk->new_rate)
Sachin Kamat5fda6852013-03-13 15:17:49 +05301442 return NULL;
Mike Turquetteb24764902012-03-15 23:11:19 -07001443
1444 if (clk->notifier_count) {
1445 ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001446 if (ret & NOTIFY_STOP_MASK)
Mike Turquetteb24764902012-03-15 23:11:19 -07001447 fail_clk = clk;
1448 }
1449
Sasha Levinb67bfe02013-02-27 17:06:00 -08001450 hlist_for_each_entry(child, &clk->children, child_node) {
James Hogan71472c02013-07-29 12:25:00 +01001451 /* Skip children who will be reparented to another clock */
1452 if (child->new_parent && child->new_parent != clk)
1453 continue;
1454 tmp_clk = clk_propagate_rate_change(child, event);
1455 if (tmp_clk)
1456 fail_clk = tmp_clk;
1457 }
1458
1459 /* handle the new child who might not be in clk->children yet */
1460 if (clk->new_child) {
1461 tmp_clk = clk_propagate_rate_change(clk->new_child, event);
1462 if (tmp_clk)
1463 fail_clk = tmp_clk;
Mike Turquetteb24764902012-03-15 23:11:19 -07001464 }
1465
1466 return fail_clk;
1467}
1468
1469/*
1470 * walk down a subtree and set the new rates notifying the rate
1471 * change on the way
1472 */
1473static void clk_change_rate(struct clk *clk)
1474{
1475 struct clk *child;
1476 unsigned long old_rate;
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001477 unsigned long best_parent_rate = 0;
Stephen Boyd3fa22522014-01-15 10:47:22 -08001478 bool skip_set_rate = false;
1479 struct clk *old_parent;
Mike Turquetteb24764902012-03-15 23:11:19 -07001480
1481 old_rate = clk->rate;
1482
Stephen Boyd3fa22522014-01-15 10:47:22 -08001483 if (clk->new_parent)
1484 best_parent_rate = clk->new_parent->rate;
1485 else if (clk->parent)
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001486 best_parent_rate = clk->parent->rate;
1487
Stephen Boyd3fa22522014-01-15 10:47:22 -08001488 if (clk->new_parent && clk->new_parent != clk->parent) {
1489 old_parent = __clk_set_parent_before(clk, clk->new_parent);
1490
1491 if (clk->ops->set_rate_and_parent) {
1492 skip_set_rate = true;
1493 clk->ops->set_rate_and_parent(clk->hw, clk->new_rate,
1494 best_parent_rate,
1495 clk->new_parent_index);
1496 } else if (clk->ops->set_parent) {
1497 clk->ops->set_parent(clk->hw, clk->new_parent_index);
1498 }
1499
1500 __clk_set_parent_after(clk, clk->new_parent, old_parent);
1501 }
1502
1503 if (!skip_set_rate && clk->ops->set_rate)
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001504 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001505
Stephen Boyd8f2c2db2014-03-26 16:06:36 -07001506 clk->rate = clk_recalc(clk, best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001507
1508 if (clk->notifier_count && old_rate != clk->rate)
1509 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
1510
James Hogan71472c02013-07-29 12:25:00 +01001511 hlist_for_each_entry(child, &clk->children, child_node) {
1512 /* Skip children who will be reparented to another clock */
1513 if (child->new_parent && child->new_parent != clk)
1514 continue;
Mike Turquetteb24764902012-03-15 23:11:19 -07001515 clk_change_rate(child);
James Hogan71472c02013-07-29 12:25:00 +01001516 }
1517
1518 /* handle the new child who might not be in clk->children yet */
1519 if (clk->new_child)
1520 clk_change_rate(clk->new_child);
Mike Turquetteb24764902012-03-15 23:11:19 -07001521}
1522
1523/**
1524 * clk_set_rate - specify a new rate for clk
1525 * @clk: the clk whose rate is being changed
1526 * @rate: the new rate for clk
1527 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001528 * In the simplest case clk_set_rate will only adjust the rate of clk.
Mike Turquetteb24764902012-03-15 23:11:19 -07001529 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001530 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1531 * propagate up to clk's parent; whether or not this happens depends on the
1532 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
1533 * after calling .round_rate then upstream parent propagation is ignored. If
1534 * *parent_rate comes back with a new rate for clk's parent then we propagate
Peter Meerwald24ee1a02013-06-29 15:14:19 +02001535 * up to clk's parent and set its rate. Upward propagation will continue
Mike Turquette5654dc92012-03-26 11:51:34 -07001536 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1537 * .round_rate stops requesting changes to clk's parent_rate.
Mike Turquetteb24764902012-03-15 23:11:19 -07001538 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001539 * Rate changes are accomplished via tree traversal that also recalculates the
1540 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
Mike Turquetteb24764902012-03-15 23:11:19 -07001541 *
1542 * Returns 0 on success, -EERROR otherwise.
1543 */
1544int clk_set_rate(struct clk *clk, unsigned long rate)
1545{
1546 struct clk *top, *fail_clk;
1547 int ret = 0;
1548
Mike Turquette89ac8d72013-08-21 23:58:09 -07001549 if (!clk)
1550 return 0;
1551
Mike Turquetteb24764902012-03-15 23:11:19 -07001552 /* prevent racing with updates to the clock topology */
Mike Turquetteeab89f62013-03-28 13:59:01 -07001553 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001554
1555 /* bail early if nothing to do */
Peter De Schrijver34e452a2013-06-05 18:06:36 +03001556 if (rate == clk_get_rate(clk))
Mike Turquetteb24764902012-03-15 23:11:19 -07001557 goto out;
1558
Saravana Kannan7e0fa1b2012-05-15 13:43:42 -07001559 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
Viresh Kumar0e1c0302012-04-11 16:03:42 +05301560 ret = -EBUSY;
1561 goto out;
1562 }
1563
Mike Turquetteb24764902012-03-15 23:11:19 -07001564 /* calculate new rates and get the topmost changed clock */
1565 top = clk_calc_new_rates(clk, rate);
1566 if (!top) {
1567 ret = -EINVAL;
1568 goto out;
1569 }
1570
1571 /* notify that we are about to change rates */
1572 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1573 if (fail_clk) {
Sascha Hauerf7363862014-01-16 16:12:55 +01001574 pr_debug("%s: failed to set %s rate\n", __func__,
Mike Turquetteb24764902012-03-15 23:11:19 -07001575 fail_clk->name);
1576 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1577 ret = -EBUSY;
1578 goto out;
1579 }
1580
1581 /* change the rates */
1582 clk_change_rate(top);
1583
Mike Turquetteb24764902012-03-15 23:11:19 -07001584out:
Mike Turquetteeab89f62013-03-28 13:59:01 -07001585 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001586
1587 return ret;
1588}
1589EXPORT_SYMBOL_GPL(clk_set_rate);
1590
1591/**
1592 * clk_get_parent - return the parent of a clk
1593 * @clk: the clk whose parent gets returned
1594 *
1595 * Simply returns clk->parent. Returns NULL if clk is NULL.
1596 */
1597struct clk *clk_get_parent(struct clk *clk)
1598{
1599 struct clk *parent;
1600
Mike Turquetteeab89f62013-03-28 13:59:01 -07001601 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001602 parent = __clk_get_parent(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -07001603 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001604
1605 return parent;
1606}
1607EXPORT_SYMBOL_GPL(clk_get_parent);
1608
1609/*
1610 * .get_parent is mandatory for clocks with multiple possible parents. It is
1611 * optional for single-parent clocks. Always call .get_parent if it is
1612 * available and WARN if it is missing for multi-parent clocks.
1613 *
1614 * For single-parent clocks without .get_parent, first check to see if the
1615 * .parents array exists, and if so use it to avoid an expensive tree
1616 * traversal. If .parents does not exist then walk the tree with __clk_lookup.
1617 */
1618static struct clk *__clk_init_parent(struct clk *clk)
1619{
1620 struct clk *ret = NULL;
1621 u8 index;
1622
1623 /* handle the trivial cases */
1624
1625 if (!clk->num_parents)
1626 goto out;
1627
1628 if (clk->num_parents == 1) {
1629 if (IS_ERR_OR_NULL(clk->parent))
1630 ret = clk->parent = __clk_lookup(clk->parent_names[0]);
1631 ret = clk->parent;
1632 goto out;
1633 }
1634
1635 if (!clk->ops->get_parent) {
1636 WARN(!clk->ops->get_parent,
1637 "%s: multi-parent clocks must implement .get_parent\n",
1638 __func__);
1639 goto out;
1640 };
1641
1642 /*
1643 * Do our best to cache parent clocks in clk->parents. This prevents
1644 * unnecessary and expensive calls to __clk_lookup. We don't set
1645 * clk->parent here; that is done by the calling function
1646 */
1647
1648 index = clk->ops->get_parent(clk->hw);
1649
1650 if (!clk->parents)
1651 clk->parents =
Tomasz Figa96a7ed92013-09-29 02:37:15 +02001652 kcalloc(clk->num_parents, sizeof(struct clk *),
Mike Turquetteb24764902012-03-15 23:11:19 -07001653 GFP_KERNEL);
1654
James Hogan7ef3dcc2013-07-29 12:24:58 +01001655 ret = clk_get_parent_by_index(clk, index);
Mike Turquetteb24764902012-03-15 23:11:19 -07001656
1657out:
1658 return ret;
1659}
1660
Ulf Hanssonb33d2122013-04-02 23:09:37 +02001661void __clk_reparent(struct clk *clk, struct clk *new_parent)
1662{
1663 clk_reparent(clk, new_parent);
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001664 __clk_recalc_accuracies(clk);
Mike Turquetteb24764902012-03-15 23:11:19 -07001665 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1666}
1667
Mike Turquetteb24764902012-03-15 23:11:19 -07001668/**
1669 * clk_set_parent - switch the parent of a mux clk
1670 * @clk: the mux clk whose input we are switching
1671 * @parent: the new input to clk
1672 *
Saravana Kannanf8aa0bd2013-05-15 21:07:24 -07001673 * Re-parent clk to use parent as its new input source. If clk is in
1674 * prepared state, the clk will get enabled for the duration of this call. If
1675 * that's not acceptable for a specific clk (Eg: the consumer can't handle
1676 * that, the reparenting is glitchy in hardware, etc), use the
1677 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
1678 *
1679 * After successfully changing clk's parent clk_set_parent will update the
1680 * clk topology, sysfs topology and propagate rate recalculation via
1681 * __clk_recalc_rates.
1682 *
1683 * Returns 0 on success, -EERROR otherwise.
Mike Turquetteb24764902012-03-15 23:11:19 -07001684 */
1685int clk_set_parent(struct clk *clk, struct clk *parent)
1686{
1687 int ret = 0;
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001688 int p_index = 0;
Ulf Hansson031dcc92013-04-02 23:09:38 +02001689 unsigned long p_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001690
Mike Turquette89ac8d72013-08-21 23:58:09 -07001691 if (!clk)
1692 return 0;
1693
Ulf Hansson031dcc92013-04-02 23:09:38 +02001694 /* verify ops for for multi-parent clks */
1695 if ((clk->num_parents > 1) && (!clk->ops->set_parent))
Mike Turquetteb24764902012-03-15 23:11:19 -07001696 return -ENOSYS;
1697
1698 /* prevent racing with updates to the clock topology */
Mike Turquetteeab89f62013-03-28 13:59:01 -07001699 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001700
1701 if (clk->parent == parent)
1702 goto out;
1703
Ulf Hansson031dcc92013-04-02 23:09:38 +02001704 /* check that we are allowed to re-parent if the clock is in use */
1705 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
1706 ret = -EBUSY;
1707 goto out;
1708 }
1709
1710 /* try finding the new parent index */
1711 if (parent) {
1712 p_index = clk_fetch_parent_index(clk, parent);
1713 p_rate = parent->rate;
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001714 if (p_index < 0) {
Ulf Hansson031dcc92013-04-02 23:09:38 +02001715 pr_debug("%s: clk %s can not be parent of clk %s\n",
1716 __func__, parent->name, clk->name);
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001717 ret = p_index;
Ulf Hansson031dcc92013-04-02 23:09:38 +02001718 goto out;
1719 }
1720 }
1721
Mike Turquetteb24764902012-03-15 23:11:19 -07001722 /* propagate PRE_RATE_CHANGE notifications */
Soren Brinkmannf3aab5d2013-04-16 10:06:50 -07001723 ret = __clk_speculate_rates(clk, p_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001724
1725 /* abort if a driver objects */
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001726 if (ret & NOTIFY_STOP_MASK)
Mike Turquetteb24764902012-03-15 23:11:19 -07001727 goto out;
1728
Ulf Hansson031dcc92013-04-02 23:09:38 +02001729 /* do the re-parent */
1730 ret = __clk_set_parent(clk, parent, p_index);
Mike Turquetteb24764902012-03-15 23:11:19 -07001731
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001732 /* propagate rate an accuracy recalculation accordingly */
1733 if (ret) {
Mike Turquetteb24764902012-03-15 23:11:19 -07001734 __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001735 } else {
Ulf Hanssona68de8e2013-04-02 23:09:39 +02001736 __clk_recalc_rates(clk, POST_RATE_CHANGE);
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001737 __clk_recalc_accuracies(clk);
1738 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001739
1740out:
Mike Turquetteeab89f62013-03-28 13:59:01 -07001741 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001742
1743 return ret;
1744}
1745EXPORT_SYMBOL_GPL(clk_set_parent);
1746
1747/**
Mike Turquettee59c5372014-02-18 21:21:25 -08001748 * clk_set_phase - adjust the phase shift of a clock signal
1749 * @clk: clock signal source
1750 * @degrees: number of degrees the signal is shifted
1751 *
1752 * Shifts the phase of a clock signal by the specified
1753 * degrees. Returns 0 on success, -EERROR otherwise.
1754 *
1755 * This function makes no distinction about the input or reference
1756 * signal that we adjust the clock signal phase against. For example
1757 * phase locked-loop clock signal generators we may shift phase with
1758 * respect to feedback clock signal input, but for other cases the
1759 * clock phase may be shifted with respect to some other, unspecified
1760 * signal.
1761 *
1762 * Additionally the concept of phase shift does not propagate through
1763 * the clock tree hierarchy, which sets it apart from clock rates and
1764 * clock accuracy. A parent clock phase attribute does not have an
1765 * impact on the phase attribute of a child clock.
1766 */
1767int clk_set_phase(struct clk *clk, int degrees)
1768{
1769 int ret = 0;
1770
1771 if (!clk)
1772 goto out;
1773
1774 /* sanity check degrees */
1775 degrees %= 360;
1776 if (degrees < 0)
1777 degrees += 360;
1778
1779 clk_prepare_lock();
1780
1781 if (!clk->ops->set_phase)
1782 goto out_unlock;
1783
1784 ret = clk->ops->set_phase(clk->hw, degrees);
1785
1786 if (!ret)
1787 clk->phase = degrees;
1788
1789out_unlock:
1790 clk_prepare_unlock();
1791
1792out:
1793 return ret;
1794}
1795
1796/**
1797 * clk_get_phase - return the phase shift of a clock signal
1798 * @clk: clock signal source
1799 *
1800 * Returns the phase shift of a clock node in degrees, otherwise returns
1801 * -EERROR.
1802 */
1803int clk_get_phase(struct clk *clk)
1804{
1805 int ret = 0;
1806
1807 if (!clk)
1808 goto out;
1809
1810 clk_prepare_lock();
1811 ret = clk->phase;
1812 clk_prepare_unlock();
1813
1814out:
1815 return ret;
1816}
1817
1818/**
Mike Turquetteb24764902012-03-15 23:11:19 -07001819 * __clk_init - initialize the data structures in a struct clk
1820 * @dev: device initializing this clk, placeholder for now
1821 * @clk: clk being initialized
1822 *
1823 * Initializes the lists in struct clk, queries the hardware for the
1824 * parent and rate and sets them both.
Mike Turquetteb24764902012-03-15 23:11:19 -07001825 */
Mike Turquetted1302a32012-03-29 14:30:40 -07001826int __clk_init(struct device *dev, struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -07001827{
Mike Turquetted1302a32012-03-29 14:30:40 -07001828 int i, ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001829 struct clk *orphan;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001830 struct hlist_node *tmp2;
Mike Turquetteb24764902012-03-15 23:11:19 -07001831
1832 if (!clk)
Mike Turquetted1302a32012-03-29 14:30:40 -07001833 return -EINVAL;
Mike Turquetteb24764902012-03-15 23:11:19 -07001834
Mike Turquetteeab89f62013-03-28 13:59:01 -07001835 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001836
1837 /* check to see if a clock with this name is already registered */
Mike Turquetted1302a32012-03-29 14:30:40 -07001838 if (__clk_lookup(clk->name)) {
1839 pr_debug("%s: clk %s already initialized\n",
1840 __func__, clk->name);
1841 ret = -EEXIST;
Mike Turquetteb24764902012-03-15 23:11:19 -07001842 goto out;
Mike Turquetted1302a32012-03-29 14:30:40 -07001843 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001844
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07001845 /* check that clk_ops are sane. See Documentation/clk.txt */
1846 if (clk->ops->set_rate &&
James Hogan71472c02013-07-29 12:25:00 +01001847 !((clk->ops->round_rate || clk->ops->determine_rate) &&
1848 clk->ops->recalc_rate)) {
1849 pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07001850 __func__, clk->name);
Mike Turquetted1302a32012-03-29 14:30:40 -07001851 ret = -EINVAL;
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07001852 goto out;
1853 }
1854
1855 if (clk->ops->set_parent && !clk->ops->get_parent) {
1856 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1857 __func__, clk->name);
Mike Turquetted1302a32012-03-29 14:30:40 -07001858 ret = -EINVAL;
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07001859 goto out;
1860 }
1861
Stephen Boyd3fa22522014-01-15 10:47:22 -08001862 if (clk->ops->set_rate_and_parent &&
1863 !(clk->ops->set_parent && clk->ops->set_rate)) {
1864 pr_warn("%s: %s must implement .set_parent & .set_rate\n",
1865 __func__, clk->name);
1866 ret = -EINVAL;
1867 goto out;
1868 }
1869
Mike Turquetteb24764902012-03-15 23:11:19 -07001870 /* throw a WARN if any entries in parent_names are NULL */
1871 for (i = 0; i < clk->num_parents; i++)
1872 WARN(!clk->parent_names[i],
1873 "%s: invalid NULL in %s's .parent_names\n",
1874 __func__, clk->name);
1875
1876 /*
1877 * Allocate an array of struct clk *'s to avoid unnecessary string
1878 * look-ups of clk's possible parents. This can fail for clocks passed
1879 * in to clk_init during early boot; thus any access to clk->parents[]
1880 * must always check for a NULL pointer and try to populate it if
1881 * necessary.
1882 *
1883 * If clk->parents is not NULL we skip this entire block. This allows
1884 * for clock drivers to statically initialize clk->parents.
1885 */
Rajendra Nayak9ca1c5a2012-06-06 14:41:30 +05301886 if (clk->num_parents > 1 && !clk->parents) {
Tomasz Figa96a7ed92013-09-29 02:37:15 +02001887 clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *),
1888 GFP_KERNEL);
Mike Turquetteb24764902012-03-15 23:11:19 -07001889 /*
1890 * __clk_lookup returns NULL for parents that have not been
1891 * clk_init'd; thus any access to clk->parents[] must check
1892 * for a NULL pointer. We can always perform lazy lookups for
1893 * missing parents later on.
1894 */
1895 if (clk->parents)
1896 for (i = 0; i < clk->num_parents; i++)
1897 clk->parents[i] =
1898 __clk_lookup(clk->parent_names[i]);
1899 }
1900
1901 clk->parent = __clk_init_parent(clk);
1902
1903 /*
1904 * Populate clk->parent if parent has already been __clk_init'd. If
1905 * parent has not yet been __clk_init'd then place clk in the orphan
1906 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
1907 * clk list.
1908 *
1909 * Every time a new clk is clk_init'd then we walk the list of orphan
1910 * clocks and re-parent any that are children of the clock currently
1911 * being clk_init'd.
1912 */
1913 if (clk->parent)
1914 hlist_add_head(&clk->child_node,
1915 &clk->parent->children);
1916 else if (clk->flags & CLK_IS_ROOT)
1917 hlist_add_head(&clk->child_node, &clk_root_list);
1918 else
1919 hlist_add_head(&clk->child_node, &clk_orphan_list);
1920
1921 /*
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001922 * Set clk's accuracy. The preferred method is to use
1923 * .recalc_accuracy. For simple clocks and lazy developers the default
1924 * fallback is to use the parent's accuracy. If a clock doesn't have a
1925 * parent (or is orphaned) then accuracy is set to zero (perfect
1926 * clock).
1927 */
1928 if (clk->ops->recalc_accuracy)
1929 clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
1930 __clk_get_accuracy(clk->parent));
1931 else if (clk->parent)
1932 clk->accuracy = clk->parent->accuracy;
1933 else
1934 clk->accuracy = 0;
1935
1936 /*
Maxime Ripard9824cf72014-07-14 13:53:27 +02001937 * Set clk's phase.
1938 * Since a phase is by definition relative to its parent, just
1939 * query the current clock phase, or just assume it's in phase.
1940 */
1941 if (clk->ops->get_phase)
1942 clk->phase = clk->ops->get_phase(clk->hw);
1943 else
1944 clk->phase = 0;
1945
1946 /*
Mike Turquetteb24764902012-03-15 23:11:19 -07001947 * Set clk's rate. The preferred method is to use .recalc_rate. For
1948 * simple clocks and lazy developers the default fallback is to use the
1949 * parent's rate. If a clock doesn't have a parent (or is orphaned)
1950 * then rate is set to zero.
1951 */
1952 if (clk->ops->recalc_rate)
1953 clk->rate = clk->ops->recalc_rate(clk->hw,
1954 __clk_get_rate(clk->parent));
1955 else if (clk->parent)
1956 clk->rate = clk->parent->rate;
1957 else
1958 clk->rate = 0;
1959
Stephen Boyd3a5aec22013-10-16 00:40:03 -07001960 clk_debug_register(clk);
Mike Turquetteb24764902012-03-15 23:11:19 -07001961 /*
1962 * walk the list of orphan clocks and reparent any that are children of
1963 * this clock
1964 */
Sasha Levinb67bfe02013-02-27 17:06:00 -08001965 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
Alex Elder12d298862013-09-05 08:33:24 -05001966 if (orphan->num_parents && orphan->ops->get_parent) {
Martin Fuzzey1f61e5f2012-11-22 20:15:05 +01001967 i = orphan->ops->get_parent(orphan->hw);
1968 if (!strcmp(clk->name, orphan->parent_names[i]))
1969 __clk_reparent(orphan, clk);
1970 continue;
1971 }
1972
Mike Turquetteb24764902012-03-15 23:11:19 -07001973 for (i = 0; i < orphan->num_parents; i++)
1974 if (!strcmp(clk->name, orphan->parent_names[i])) {
1975 __clk_reparent(orphan, clk);
1976 break;
1977 }
Martin Fuzzey1f61e5f2012-11-22 20:15:05 +01001978 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001979
1980 /*
1981 * optional platform-specific magic
1982 *
1983 * The .init callback is not used by any of the basic clock types, but
1984 * exists for weird hardware that must perform initialization magic.
1985 * Please consider other ways of solving initialization problems before
Peter Meerwald24ee1a02013-06-29 15:14:19 +02001986 * using this callback, as its use is discouraged.
Mike Turquetteb24764902012-03-15 23:11:19 -07001987 */
1988 if (clk->ops->init)
1989 clk->ops->init(clk->hw);
1990
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02001991 kref_init(&clk->ref);
Mike Turquetteb24764902012-03-15 23:11:19 -07001992out:
Mike Turquetteeab89f62013-03-28 13:59:01 -07001993 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001994
Mike Turquetted1302a32012-03-29 14:30:40 -07001995 return ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07001996}
1997
1998/**
Saravana Kannan0197b3e2012-04-25 22:58:56 -07001999 * __clk_register - register a clock and return a cookie.
2000 *
2001 * Same as clk_register, except that the .clk field inside hw shall point to a
2002 * preallocated (generally statically allocated) struct clk. None of the fields
2003 * of the struct clk need to be initialized.
2004 *
2005 * The data pointed to by .init and .clk field shall NOT be marked as init
2006 * data.
2007 *
2008 * __clk_register is only exposed via clk-private.h and is intended for use with
2009 * very large numbers of clocks that need to be statically initialized. It is
2010 * a layering violation to include clk-private.h from any code which implements
2011 * a clock's .ops; as such any statically initialized clock data MUST be in a
Peter Meerwald24ee1a02013-06-29 15:14:19 +02002012 * separate C file from the logic that implements its operations. Returns 0
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002013 * on success, otherwise an error code.
2014 */
2015struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
2016{
2017 int ret;
2018 struct clk *clk;
2019
2020 clk = hw->clk;
2021 clk->name = hw->init->name;
2022 clk->ops = hw->init->ops;
2023 clk->hw = hw;
2024 clk->flags = hw->init->flags;
2025 clk->parent_names = hw->init->parent_names;
2026 clk->num_parents = hw->init->num_parents;
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02002027 if (dev && dev->driver)
2028 clk->owner = dev->driver->owner;
2029 else
2030 clk->owner = NULL;
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002031
2032 ret = __clk_init(dev, clk);
2033 if (ret)
2034 return ERR_PTR(ret);
2035
2036 return clk;
2037}
2038EXPORT_SYMBOL_GPL(__clk_register);
2039
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002040/**
2041 * clk_register - allocate a new clock, register it and return an opaque cookie
2042 * @dev: device that is registering this clock
2043 * @hw: link to hardware-specific clock data
2044 *
2045 * clk_register is the primary interface for populating the clock tree with new
2046 * clock nodes. It returns a pointer to the newly allocated struct clk which
2047 * cannot be dereferenced by driver code but may be used in conjuction with the
2048 * rest of the clock API. In the event of an error clk_register will return an
2049 * error code; drivers must test for an error code after calling clk_register.
2050 */
2051struct clk *clk_register(struct device *dev, struct clk_hw *hw)
Mike Turquetteb24764902012-03-15 23:11:19 -07002052{
Mike Turquetted1302a32012-03-29 14:30:40 -07002053 int i, ret;
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002054 struct clk *clk;
2055
2056 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2057 if (!clk) {
2058 pr_err("%s: could not allocate clk\n", __func__);
2059 ret = -ENOMEM;
2060 goto fail_out;
2061 }
Mike Turquetteb24764902012-03-15 23:11:19 -07002062
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002063 clk->name = kstrdup(hw->init->name, GFP_KERNEL);
2064 if (!clk->name) {
2065 pr_err("%s: could not allocate clk->name\n", __func__);
2066 ret = -ENOMEM;
2067 goto fail_name;
2068 }
2069 clk->ops = hw->init->ops;
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02002070 if (dev && dev->driver)
2071 clk->owner = dev->driver->owner;
Mike Turquetteb24764902012-03-15 23:11:19 -07002072 clk->hw = hw;
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002073 clk->flags = hw->init->flags;
2074 clk->num_parents = hw->init->num_parents;
Mike Turquetteb24764902012-03-15 23:11:19 -07002075 hw->clk = clk;
2076
Mike Turquetted1302a32012-03-29 14:30:40 -07002077 /* allocate local copy in case parent_names is __initdata */
Tomasz Figa96a7ed92013-09-29 02:37:15 +02002078 clk->parent_names = kcalloc(clk->num_parents, sizeof(char *),
2079 GFP_KERNEL);
Mike Turquetteb24764902012-03-15 23:11:19 -07002080
Mike Turquetted1302a32012-03-29 14:30:40 -07002081 if (!clk->parent_names) {
2082 pr_err("%s: could not allocate clk->parent_names\n", __func__);
2083 ret = -ENOMEM;
2084 goto fail_parent_names;
2085 }
2086
2087
2088 /* copy each string name in case parent_names is __initdata */
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002089 for (i = 0; i < clk->num_parents; i++) {
2090 clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
2091 GFP_KERNEL);
Mike Turquetted1302a32012-03-29 14:30:40 -07002092 if (!clk->parent_names[i]) {
2093 pr_err("%s: could not copy parent_names\n", __func__);
2094 ret = -ENOMEM;
2095 goto fail_parent_names_copy;
2096 }
2097 }
2098
2099 ret = __clk_init(dev, clk);
2100 if (!ret)
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002101 return clk;
Mike Turquetted1302a32012-03-29 14:30:40 -07002102
2103fail_parent_names_copy:
2104 while (--i >= 0)
2105 kfree(clk->parent_names[i]);
2106 kfree(clk->parent_names);
2107fail_parent_names:
Saravana Kannan0197b3e2012-04-25 22:58:56 -07002108 kfree(clk->name);
2109fail_name:
Mike Turquetted1302a32012-03-29 14:30:40 -07002110 kfree(clk);
2111fail_out:
2112 return ERR_PTR(ret);
Mike Turquetteb24764902012-03-15 23:11:19 -07002113}
2114EXPORT_SYMBOL_GPL(clk_register);
2115
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002116/*
2117 * Free memory allocated for a clock.
2118 * Caller must hold prepare_lock.
2119 */
2120static void __clk_release(struct kref *ref)
2121{
2122 struct clk *clk = container_of(ref, struct clk, ref);
2123 int i = clk->num_parents;
2124
2125 kfree(clk->parents);
2126 while (--i >= 0)
2127 kfree(clk->parent_names[i]);
2128
2129 kfree(clk->parent_names);
2130 kfree(clk->name);
2131 kfree(clk);
2132}
2133
2134/*
2135 * Empty clk_ops for unregistered clocks. These are used temporarily
2136 * after clk_unregister() was called on a clock and until last clock
2137 * consumer calls clk_put() and the struct clk object is freed.
2138 */
2139static int clk_nodrv_prepare_enable(struct clk_hw *hw)
2140{
2141 return -ENXIO;
2142}
2143
2144static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
2145{
2146 WARN_ON_ONCE(1);
2147}
2148
2149static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
2150 unsigned long parent_rate)
2151{
2152 return -ENXIO;
2153}
2154
2155static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
2156{
2157 return -ENXIO;
2158}
2159
2160static const struct clk_ops clk_nodrv_ops = {
2161 .enable = clk_nodrv_prepare_enable,
2162 .disable = clk_nodrv_disable_unprepare,
2163 .prepare = clk_nodrv_prepare_enable,
2164 .unprepare = clk_nodrv_disable_unprepare,
2165 .set_rate = clk_nodrv_set_rate,
2166 .set_parent = clk_nodrv_set_parent,
2167};
2168
Mark Brown1df5c932012-04-18 09:07:12 +01002169/**
2170 * clk_unregister - unregister a currently registered clock
2171 * @clk: clock to unregister
Mark Brown1df5c932012-04-18 09:07:12 +01002172 */
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002173void clk_unregister(struct clk *clk)
2174{
2175 unsigned long flags;
2176
2177 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2178 return;
2179
2180 clk_prepare_lock();
2181
2182 if (clk->ops == &clk_nodrv_ops) {
2183 pr_err("%s: unregistered clock: %s\n", __func__, clk->name);
2184 goto out;
2185 }
2186 /*
2187 * Assign empty clock ops for consumers that might still hold
2188 * a reference to this clock.
2189 */
2190 flags = clk_enable_lock();
2191 clk->ops = &clk_nodrv_ops;
2192 clk_enable_unlock(flags);
2193
2194 if (!hlist_empty(&clk->children)) {
2195 struct clk *child;
Stephen Boyd874f2242014-04-18 16:29:43 -07002196 struct hlist_node *t;
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002197
2198 /* Reparent all children to the orphan list. */
Stephen Boyd874f2242014-04-18 16:29:43 -07002199 hlist_for_each_entry_safe(child, t, &clk->children, child_node)
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002200 clk_set_parent(child, NULL);
2201 }
2202
2203 clk_debug_unregister(clk);
2204
2205 hlist_del_init(&clk->child_node);
2206
2207 if (clk->prepare_count)
2208 pr_warn("%s: unregistering prepared clock: %s\n",
2209 __func__, clk->name);
2210
2211 kref_put(&clk->ref, __clk_release);
2212out:
2213 clk_prepare_unlock();
2214}
Mark Brown1df5c932012-04-18 09:07:12 +01002215EXPORT_SYMBOL_GPL(clk_unregister);
2216
Stephen Boyd46c87732012-09-24 13:38:04 -07002217static void devm_clk_release(struct device *dev, void *res)
2218{
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002219 clk_unregister(*(struct clk **)res);
Stephen Boyd46c87732012-09-24 13:38:04 -07002220}
2221
2222/**
2223 * devm_clk_register - resource managed clk_register()
2224 * @dev: device that is registering this clock
2225 * @hw: link to hardware-specific clock data
2226 *
2227 * Managed clk_register(). Clocks returned from this function are
2228 * automatically clk_unregister()ed on driver detach. See clk_register() for
2229 * more information.
2230 */
2231struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
2232{
2233 struct clk *clk;
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002234 struct clk **clkp;
Stephen Boyd46c87732012-09-24 13:38:04 -07002235
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002236 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
2237 if (!clkp)
Stephen Boyd46c87732012-09-24 13:38:04 -07002238 return ERR_PTR(-ENOMEM);
2239
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002240 clk = clk_register(dev, hw);
2241 if (!IS_ERR(clk)) {
2242 *clkp = clk;
2243 devres_add(dev, clkp);
Stephen Boyd46c87732012-09-24 13:38:04 -07002244 } else {
Stephen Boyd293ba3b2014-04-18 16:29:42 -07002245 devres_free(clkp);
Stephen Boyd46c87732012-09-24 13:38:04 -07002246 }
2247
2248 return clk;
2249}
2250EXPORT_SYMBOL_GPL(devm_clk_register);
2251
2252static int devm_clk_match(struct device *dev, void *res, void *data)
2253{
2254 struct clk *c = res;
2255 if (WARN_ON(!c))
2256 return 0;
2257 return c == data;
2258}
2259
2260/**
2261 * devm_clk_unregister - resource managed clk_unregister()
2262 * @clk: clock to unregister
2263 *
2264 * Deallocate a clock allocated with devm_clk_register(). Normally
2265 * this function will not need to be called and the resource management
2266 * code will ensure that the resource is freed.
2267 */
2268void devm_clk_unregister(struct device *dev, struct clk *clk)
2269{
2270 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
2271}
2272EXPORT_SYMBOL_GPL(devm_clk_unregister);
2273
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02002274/*
2275 * clkdev helpers
2276 */
2277int __clk_get(struct clk *clk)
2278{
Sylwester Nawrocki00efcb12014-01-07 13:03:43 +01002279 if (clk) {
2280 if (!try_module_get(clk->owner))
2281 return 0;
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02002282
Sylwester Nawrocki00efcb12014-01-07 13:03:43 +01002283 kref_get(&clk->ref);
2284 }
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02002285 return 1;
2286}
2287
2288void __clk_put(struct clk *clk)
2289{
Sylwester Nawrocki00efcb12014-01-07 13:03:43 +01002290 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02002291 return;
2292
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02002293 clk_prepare_lock();
2294 kref_put(&clk->ref, __clk_release);
2295 clk_prepare_unlock();
2296
Sylwester Nawrocki00efcb12014-01-07 13:03:43 +01002297 module_put(clk->owner);
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02002298}
2299
Mike Turquetteb24764902012-03-15 23:11:19 -07002300/*** clk rate change notifiers ***/
2301
2302/**
2303 * clk_notifier_register - add a clk rate change notifier
2304 * @clk: struct clk * to watch
2305 * @nb: struct notifier_block * with callback info
2306 *
2307 * Request notification when clk's rate changes. This uses an SRCU
2308 * notifier because we want it to block and notifier unregistrations are
2309 * uncommon. The callbacks associated with the notifier must not
2310 * re-enter into the clk framework by calling any top-level clk APIs;
2311 * this will cause a nested prepare_lock mutex.
2312 *
Soren Brinkmann5324fda2014-01-22 11:48:37 -08002313 * In all notification cases cases (pre, post and abort rate change) the
2314 * original clock rate is passed to the callback via struct
2315 * clk_notifier_data.old_rate and the new frequency is passed via struct
Mike Turquetteb24764902012-03-15 23:11:19 -07002316 * clk_notifier_data.new_rate.
2317 *
Mike Turquetteb24764902012-03-15 23:11:19 -07002318 * clk_notifier_register() must be called from non-atomic context.
2319 * Returns -EINVAL if called with null arguments, -ENOMEM upon
2320 * allocation failure; otherwise, passes along the return value of
2321 * srcu_notifier_chain_register().
2322 */
2323int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
2324{
2325 struct clk_notifier *cn;
2326 int ret = -ENOMEM;
2327
2328 if (!clk || !nb)
2329 return -EINVAL;
2330
Mike Turquetteeab89f62013-03-28 13:59:01 -07002331 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002332
2333 /* search the list of notifiers for this clk */
2334 list_for_each_entry(cn, &clk_notifier_list, node)
2335 if (cn->clk == clk)
2336 break;
2337
2338 /* if clk wasn't in the notifier list, allocate new clk_notifier */
2339 if (cn->clk != clk) {
2340 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
2341 if (!cn)
2342 goto out;
2343
2344 cn->clk = clk;
2345 srcu_init_notifier_head(&cn->notifier_head);
2346
2347 list_add(&cn->node, &clk_notifier_list);
2348 }
2349
2350 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
2351
2352 clk->notifier_count++;
2353
2354out:
Mike Turquetteeab89f62013-03-28 13:59:01 -07002355 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002356
2357 return ret;
2358}
2359EXPORT_SYMBOL_GPL(clk_notifier_register);
2360
2361/**
2362 * clk_notifier_unregister - remove a clk rate change notifier
2363 * @clk: struct clk *
2364 * @nb: struct notifier_block * with callback info
2365 *
2366 * Request no further notification for changes to 'clk' and frees memory
2367 * allocated in clk_notifier_register.
2368 *
2369 * Returns -EINVAL if called with null arguments; otherwise, passes
2370 * along the return value of srcu_notifier_chain_unregister().
2371 */
2372int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
2373{
2374 struct clk_notifier *cn = NULL;
2375 int ret = -EINVAL;
2376
2377 if (!clk || !nb)
2378 return -EINVAL;
2379
Mike Turquetteeab89f62013-03-28 13:59:01 -07002380 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002381
2382 list_for_each_entry(cn, &clk_notifier_list, node)
2383 if (cn->clk == clk)
2384 break;
2385
2386 if (cn->clk == clk) {
2387 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
2388
2389 clk->notifier_count--;
2390
2391 /* XXX the notifier code should handle this better */
2392 if (!cn->notifier_head.head) {
2393 srcu_cleanup_notifier_head(&cn->notifier_head);
Lai Jiangshan72b53222013-06-03 17:17:15 +08002394 list_del(&cn->node);
Mike Turquetteb24764902012-03-15 23:11:19 -07002395 kfree(cn);
2396 }
2397
2398 } else {
2399 ret = -ENOENT;
2400 }
2401
Mike Turquetteeab89f62013-03-28 13:59:01 -07002402 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002403
2404 return ret;
2405}
2406EXPORT_SYMBOL_GPL(clk_notifier_unregister);
Grant Likely766e6a42012-04-09 14:50:06 -05002407
2408#ifdef CONFIG_OF
2409/**
2410 * struct of_clk_provider - Clock provider registration structure
2411 * @link: Entry in global list of clock providers
2412 * @node: Pointer to device tree node of clock provider
2413 * @get: Get clock callback. Returns NULL or a struct clk for the
2414 * given clock specifier
2415 * @data: context pointer to be passed into @get callback
2416 */
2417struct of_clk_provider {
2418 struct list_head link;
2419
2420 struct device_node *node;
2421 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
2422 void *data;
2423};
2424
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +05302425static const struct of_device_id __clk_of_table_sentinel
2426 __used __section(__clk_of_table_end);
2427
Grant Likely766e6a42012-04-09 14:50:06 -05002428static LIST_HEAD(of_clk_providers);
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02002429static DEFINE_MUTEX(of_clk_mutex);
2430
2431/* of_clk_provider list locking helpers */
2432void of_clk_lock(void)
2433{
2434 mutex_lock(&of_clk_mutex);
2435}
2436
2437void of_clk_unlock(void)
2438{
2439 mutex_unlock(&of_clk_mutex);
2440}
Grant Likely766e6a42012-04-09 14:50:06 -05002441
2442struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
2443 void *data)
2444{
2445 return data;
2446}
2447EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
2448
Shawn Guo494bfec2012-08-22 21:36:27 +08002449struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
2450{
2451 struct clk_onecell_data *clk_data = data;
2452 unsigned int idx = clkspec->args[0];
2453
2454 if (idx >= clk_data->clk_num) {
2455 pr_err("%s: invalid clock index %d\n", __func__, idx);
2456 return ERR_PTR(-EINVAL);
2457 }
2458
2459 return clk_data->clks[idx];
2460}
2461EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
2462
Grant Likely766e6a42012-04-09 14:50:06 -05002463/**
2464 * of_clk_add_provider() - Register a clock provider for a node
2465 * @np: Device node pointer associated with clock provider
2466 * @clk_src_get: callback for decoding clock
2467 * @data: context pointer for @clk_src_get callback.
2468 */
2469int of_clk_add_provider(struct device_node *np,
2470 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
2471 void *data),
2472 void *data)
2473{
2474 struct of_clk_provider *cp;
Sylwester Nawrocki86be4082014-06-18 17:29:32 +02002475 int ret;
Grant Likely766e6a42012-04-09 14:50:06 -05002476
2477 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
2478 if (!cp)
2479 return -ENOMEM;
2480
2481 cp->node = of_node_get(np);
2482 cp->data = data;
2483 cp->get = clk_src_get;
2484
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02002485 mutex_lock(&of_clk_mutex);
Grant Likely766e6a42012-04-09 14:50:06 -05002486 list_add(&cp->link, &of_clk_providers);
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02002487 mutex_unlock(&of_clk_mutex);
Grant Likely766e6a42012-04-09 14:50:06 -05002488 pr_debug("Added clock from %s\n", np->full_name);
2489
Sylwester Nawrocki86be4082014-06-18 17:29:32 +02002490 ret = of_clk_set_defaults(np, true);
2491 if (ret < 0)
2492 of_clk_del_provider(np);
2493
2494 return ret;
Grant Likely766e6a42012-04-09 14:50:06 -05002495}
2496EXPORT_SYMBOL_GPL(of_clk_add_provider);
2497
2498/**
2499 * of_clk_del_provider() - Remove a previously registered clock provider
2500 * @np: Device node pointer associated with clock provider
2501 */
2502void of_clk_del_provider(struct device_node *np)
2503{
2504 struct of_clk_provider *cp;
2505
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02002506 mutex_lock(&of_clk_mutex);
Grant Likely766e6a42012-04-09 14:50:06 -05002507 list_for_each_entry(cp, &of_clk_providers, link) {
2508 if (cp->node == np) {
2509 list_del(&cp->link);
2510 of_node_put(cp->node);
2511 kfree(cp);
2512 break;
2513 }
2514 }
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02002515 mutex_unlock(&of_clk_mutex);
Grant Likely766e6a42012-04-09 14:50:06 -05002516}
2517EXPORT_SYMBOL_GPL(of_clk_del_provider);
2518
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02002519struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
Grant Likely766e6a42012-04-09 14:50:06 -05002520{
2521 struct of_clk_provider *provider;
Jean-Francois Moinea34cd462013-11-25 19:47:04 +01002522 struct clk *clk = ERR_PTR(-EPROBE_DEFER);
Grant Likely766e6a42012-04-09 14:50:06 -05002523
2524 /* Check if we have such a provider in our array */
Grant Likely766e6a42012-04-09 14:50:06 -05002525 list_for_each_entry(provider, &of_clk_providers, link) {
2526 if (provider->node == clkspec->np)
2527 clk = provider->get(clkspec, provider->data);
2528 if (!IS_ERR(clk))
2529 break;
2530 }
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02002531
2532 return clk;
2533}
2534
2535struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
2536{
2537 struct clk *clk;
2538
2539 mutex_lock(&of_clk_mutex);
2540 clk = __of_clk_get_from_provider(clkspec);
2541 mutex_unlock(&of_clk_mutex);
Grant Likely766e6a42012-04-09 14:50:06 -05002542
2543 return clk;
2544}
2545
Mike Turquettef6102742013-10-07 23:12:13 -07002546int of_clk_get_parent_count(struct device_node *np)
2547{
2548 return of_count_phandle_with_args(np, "clocks", "#clock-cells");
2549}
2550EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
2551
Grant Likely766e6a42012-04-09 14:50:06 -05002552const char *of_clk_get_parent_name(struct device_node *np, int index)
2553{
2554 struct of_phandle_args clkspec;
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00002555 struct property *prop;
Grant Likely766e6a42012-04-09 14:50:06 -05002556 const char *clk_name;
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00002557 const __be32 *vp;
2558 u32 pv;
Grant Likely766e6a42012-04-09 14:50:06 -05002559 int rc;
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00002560 int count;
Grant Likely766e6a42012-04-09 14:50:06 -05002561
2562 if (index < 0)
2563 return NULL;
2564
2565 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
2566 &clkspec);
2567 if (rc)
2568 return NULL;
2569
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00002570 index = clkspec.args_count ? clkspec.args[0] : 0;
2571 count = 0;
2572
2573 /* if there is an indices property, use it to transfer the index
2574 * specified into an array offset for the clock-output-names property.
2575 */
2576 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
2577 if (index == pv) {
2578 index = count;
2579 break;
2580 }
2581 count++;
2582 }
2583
Grant Likely766e6a42012-04-09 14:50:06 -05002584 if (of_property_read_string_index(clkspec.np, "clock-output-names",
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00002585 index,
Grant Likely766e6a42012-04-09 14:50:06 -05002586 &clk_name) < 0)
2587 clk_name = clkspec.np->name;
2588
2589 of_node_put(clkspec.np);
2590 return clk_name;
2591}
2592EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
2593
Gregory CLEMENT1771b102014-02-24 19:10:13 +01002594struct clock_provider {
2595 of_clk_init_cb_t clk_init_cb;
2596 struct device_node *np;
2597 struct list_head node;
2598};
2599
2600static LIST_HEAD(clk_provider_list);
2601
2602/*
2603 * This function looks for a parent clock. If there is one, then it
2604 * checks that the provider for this parent clock was initialized, in
2605 * this case the parent clock will be ready.
2606 */
2607static int parent_ready(struct device_node *np)
2608{
2609 int i = 0;
2610
2611 while (true) {
2612 struct clk *clk = of_clk_get(np, i);
2613
2614 /* this parent is ready we can check the next one */
2615 if (!IS_ERR(clk)) {
2616 clk_put(clk);
2617 i++;
2618 continue;
2619 }
2620
2621 /* at least one parent is not ready, we exit now */
2622 if (PTR_ERR(clk) == -EPROBE_DEFER)
2623 return 0;
2624
2625 /*
2626 * Here we make assumption that the device tree is
2627 * written correctly. So an error means that there is
2628 * no more parent. As we didn't exit yet, then the
2629 * previous parent are ready. If there is no clock
2630 * parent, no need to wait for them, then we can
2631 * consider their absence as being ready
2632 */
2633 return 1;
2634 }
2635}
2636
Grant Likely766e6a42012-04-09 14:50:06 -05002637/**
2638 * of_clk_init() - Scan and init clock providers from the DT
2639 * @matches: array of compatible values and init functions for providers.
2640 *
Gregory CLEMENT1771b102014-02-24 19:10:13 +01002641 * This function scans the device tree for matching clock providers
Sylwester Nawrockie5ca8fb2014-03-27 12:08:36 +01002642 * and calls their initialization functions. It also does it by trying
Gregory CLEMENT1771b102014-02-24 19:10:13 +01002643 * to follow the dependencies.
Grant Likely766e6a42012-04-09 14:50:06 -05002644 */
2645void __init of_clk_init(const struct of_device_id *matches)
2646{
Alex Elder7f7ed582013-08-22 11:31:31 -05002647 const struct of_device_id *match;
Grant Likely766e6a42012-04-09 14:50:06 -05002648 struct device_node *np;
Gregory CLEMENT1771b102014-02-24 19:10:13 +01002649 struct clock_provider *clk_provider, *next;
2650 bool is_init_done;
2651 bool force = false;
Grant Likely766e6a42012-04-09 14:50:06 -05002652
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +05302653 if (!matches)
Tero Kristo819b4862013-10-22 11:39:36 +03002654 matches = &__clk_of_table;
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +05302655
Gregory CLEMENT1771b102014-02-24 19:10:13 +01002656 /* First prepare the list of the clocks providers */
Alex Elder7f7ed582013-08-22 11:31:31 -05002657 for_each_matching_node_and_match(np, matches, &match) {
Gregory CLEMENT1771b102014-02-24 19:10:13 +01002658 struct clock_provider *parent =
2659 kzalloc(sizeof(struct clock_provider), GFP_KERNEL);
2660
2661 parent->clk_init_cb = match->data;
2662 parent->np = np;
Sylwester Nawrocki3f6d4392014-03-27 11:43:32 +01002663 list_add_tail(&parent->node, &clk_provider_list);
Gregory CLEMENT1771b102014-02-24 19:10:13 +01002664 }
2665
2666 while (!list_empty(&clk_provider_list)) {
2667 is_init_done = false;
2668 list_for_each_entry_safe(clk_provider, next,
2669 &clk_provider_list, node) {
2670 if (force || parent_ready(clk_provider->np)) {
Sylwester Nawrocki86be4082014-06-18 17:29:32 +02002671
Gregory CLEMENT1771b102014-02-24 19:10:13 +01002672 clk_provider->clk_init_cb(clk_provider->np);
Sylwester Nawrocki86be4082014-06-18 17:29:32 +02002673 of_clk_set_defaults(clk_provider->np, true);
2674
Gregory CLEMENT1771b102014-02-24 19:10:13 +01002675 list_del(&clk_provider->node);
2676 kfree(clk_provider);
2677 is_init_done = true;
2678 }
2679 }
2680
2681 /*
Sylwester Nawrockie5ca8fb2014-03-27 12:08:36 +01002682 * We didn't manage to initialize any of the
Gregory CLEMENT1771b102014-02-24 19:10:13 +01002683 * remaining providers during the last loop, so now we
2684 * initialize all the remaining ones unconditionally
2685 * in case the clock parent was not mandatory
2686 */
2687 if (!is_init_done)
2688 force = true;
Grant Likely766e6a42012-04-09 14:50:06 -05002689 }
2690}
2691#endif