blob: fb5b5809efd7796c534318a7f37fb6b1cae8400b [file] [log] [blame]
Brian Swetland600f7cf2008-09-09 11:04:14 -07001/* arch/arm/mach-msm/clock.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Tianyi Gou7949ecb2012-02-14 14:25:32 -08004 * Copyright (c) 2007-2012, Code Aurora Forum. All rights reserved.
Brian Swetland600f7cf2008-09-09 11:04:14 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Brian Swetland600f7cf2008-09-09 11:04:14 -070017#include <linux/kernel.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070018#include <linux/err.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070019#include <linux/spinlock.h>
Stephen Boydbd323442011-02-23 09:37:42 -080020#include <linux/string.h>
21#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <linux/clk.h>
Stephen Boydbd323442011-02-23 09:37:42 -080023#include <linux/clkdev.h>
Stephen Boyd5bc44d52012-03-29 11:00:57 -070024#include <trace/events/power.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070025
26#include "clock.h"
Brian Swetland600f7cf2008-09-09 11:04:14 -070027
Matt Wagantalle18bbc82011-10-06 10:07:28 -070028/* Find the voltage level required for a given rate. */
29static int find_vdd_level(struct clk *clk, unsigned long rate)
30{
31 int level;
32
33 for (level = 0; level < ARRAY_SIZE(clk->fmax); level++)
34 if (rate <= clk->fmax[level])
35 break;
36
37 if (level == ARRAY_SIZE(clk->fmax)) {
38 pr_err("Rate %lu for %s is greater than highest Fmax\n", rate,
39 clk->dbg_name);
40 return -EINVAL;
41 }
42
43 return level;
44}
45
46/* Update voltage level given the current votes. */
47static int update_vdd(struct clk_vdd_class *vdd_class)
48{
49 int level, rc;
50
51 for (level = ARRAY_SIZE(vdd_class->level_votes)-1; level > 0; level--)
52 if (vdd_class->level_votes[level])
53 break;
54
55 if (level == vdd_class->cur_level)
56 return 0;
57
58 rc = vdd_class->set_vdd(vdd_class, level);
59 if (!rc)
60 vdd_class->cur_level = level;
61
62 return rc;
63}
64
65/* Vote for a voltage level. */
66int vote_vdd_level(struct clk_vdd_class *vdd_class, int level)
67{
68 unsigned long flags;
69 int rc;
70
71 spin_lock_irqsave(&vdd_class->lock, flags);
72 vdd_class->level_votes[level]++;
73 rc = update_vdd(vdd_class);
74 if (rc)
75 vdd_class->level_votes[level]--;
76 spin_unlock_irqrestore(&vdd_class->lock, flags);
77
78 return rc;
79}
80
81/* Remove vote for a voltage level. */
82int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level)
83{
84 unsigned long flags;
85 int rc = 0;
86
87 spin_lock_irqsave(&vdd_class->lock, flags);
88 if (WARN(!vdd_class->level_votes[level],
89 "Reference counts are incorrect for %s level %d\n",
90 vdd_class->class_name, level))
91 goto out;
92 vdd_class->level_votes[level]--;
93 rc = update_vdd(vdd_class);
94 if (rc)
95 vdd_class->level_votes[level]++;
96out:
97 spin_unlock_irqrestore(&vdd_class->lock, flags);
98 return rc;
99}
100
101/* Vote for a voltage level corresponding to a clock's rate. */
102static int vote_rate_vdd(struct clk *clk, unsigned long rate)
103{
104 int level;
105
106 if (!clk->vdd_class)
107 return 0;
108
109 level = find_vdd_level(clk, rate);
110 if (level < 0)
111 return level;
112
113 return vote_vdd_level(clk->vdd_class, level);
114}
115
116/* Remove vote for a voltage level corresponding to a clock's rate. */
117static void unvote_rate_vdd(struct clk *clk, unsigned long rate)
118{
119 int level;
120
121 if (!clk->vdd_class)
122 return;
123
124 level = find_vdd_level(clk, rate);
125 if (level < 0)
126 return;
127
128 unvote_vdd_level(clk->vdd_class, level);
129}
130
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800131int clk_prepare(struct clk *clk)
132{
133 int ret = 0;
134 struct clk *parent;
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700135
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800136 if (!clk)
137 return 0;
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700138 if (IS_ERR(clk))
139 return -EINVAL;
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800140
141 mutex_lock(&clk->prepare_lock);
142 if (clk->prepare_count == 0) {
143 parent = clk_get_parent(clk);
144
145 ret = clk_prepare(parent);
146 if (ret)
147 goto out;
148 ret = clk_prepare(clk->depends);
149 if (ret)
150 goto err_prepare_depends;
151
152 if (clk->ops->prepare)
153 ret = clk->ops->prepare(clk);
154 if (ret)
155 goto err_prepare_clock;
156 }
157 clk->prepare_count++;
158out:
159 mutex_unlock(&clk->prepare_lock);
160 return ret;
161err_prepare_clock:
162 clk_unprepare(clk->depends);
163err_prepare_depends:
164 clk_unprepare(parent);
165 goto out;
166}
167EXPORT_SYMBOL(clk_prepare);
168
Brian Swetland600f7cf2008-09-09 11:04:14 -0700169/*
Brian Swetland600f7cf2008-09-09 11:04:14 -0700170 * Standard clock functions defined in include/linux/clk.h
171 */
Brian Swetland600f7cf2008-09-09 11:04:14 -0700172int clk_enable(struct clk *clk)
173{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 int ret = 0;
Matt Wagantall7205eea2011-11-04 17:31:29 -0700175 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 struct clk *parent;
177
178 if (!clk)
179 return 0;
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700180 if (IS_ERR(clk))
181 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182
183 spin_lock_irqsave(&clk->lock, flags);
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800184 if (WARN(!clk->warned && !clk->prepare_count,
185 "%s: Don't call enable on unprepared clocks\n",
186 clk->dbg_name))
187 clk->warned = true;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188 if (clk->count == 0) {
189 parent = clk_get_parent(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700190
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 ret = clk_enable(parent);
192 if (ret)
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700193 goto err_enable_parent;
Stephen Boyd7fa26742011-08-11 23:22:29 -0700194 ret = clk_enable(clk->depends);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700195 if (ret)
196 goto err_enable_depends;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197
Matt Wagantall7205eea2011-11-04 17:31:29 -0700198 ret = vote_rate_vdd(clk, clk->rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700199 if (ret)
200 goto err_vote_vdd;
Stephen Boyd5bc44d52012-03-29 11:00:57 -0700201 trace_clock_enable(clk->dbg_name, 1, smp_processor_id());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 if (clk->ops->enable)
203 ret = clk->ops->enable(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700204 if (ret)
205 goto err_enable_clock;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700206 } else if (clk->flags & CLKFLAG_HANDOFF_RATE) {
207 /*
208 * The clock was already enabled by handoff code so there is no
209 * need to enable it again here. Clearing the handoff flag will
210 * prevent the lateinit handoff code from disabling the clock if
211 * a client driver still has it enabled.
212 */
213 clk->flags &= ~CLKFLAG_HANDOFF_RATE;
214 goto out;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700216 clk->count++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217out:
218 spin_unlock_irqrestore(&clk->lock, flags);
219
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700220 return 0;
221
222err_enable_clock:
Matt Wagantall7205eea2011-11-04 17:31:29 -0700223 unvote_rate_vdd(clk, clk->rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700224err_vote_vdd:
225 clk_disable(clk->depends);
226err_enable_depends:
227 clk_disable(parent);
228err_enable_parent:
229 spin_unlock_irqrestore(&clk->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 return ret;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700231}
232EXPORT_SYMBOL(clk_enable);
233
234void clk_disable(struct clk *clk)
235{
236 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700238 if (IS_ERR_OR_NULL(clk))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239 return;
240
241 spin_lock_irqsave(&clk->lock, flags);
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800242 if (WARN(!clk->warned && !clk->prepare_count,
243 "%s: Never called prepare or calling disable "
244 "after unprepare\n",
245 clk->dbg_name))
246 clk->warned = true;
Stephen Boydd906b522011-07-26 10:51:41 -0700247 if (WARN(clk->count == 0, "%s is unbalanced", clk->dbg_name))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700248 goto out;
249 if (clk->count == 1) {
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700250 struct clk *parent = clk_get_parent(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700251
Stephen Boyd5bc44d52012-03-29 11:00:57 -0700252 trace_clock_disable(clk->dbg_name, 0, smp_processor_id());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700253 if (clk->ops->disable)
254 clk->ops->disable(clk);
Matt Wagantall7205eea2011-11-04 17:31:29 -0700255 unvote_rate_vdd(clk, clk->rate);
Stephen Boyd7fa26742011-08-11 23:22:29 -0700256 clk_disable(clk->depends);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257 clk_disable(parent);
258 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700259 clk->count--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260out:
261 spin_unlock_irqrestore(&clk->lock, flags);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700262}
263EXPORT_SYMBOL(clk_disable);
264
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800265void clk_unprepare(struct clk *clk)
266{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700267 if (IS_ERR_OR_NULL(clk))
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800268 return;
269
270 mutex_lock(&clk->prepare_lock);
271 if (!clk->prepare_count) {
272 if (WARN(!clk->warned, "%s is unbalanced (prepare)",
273 clk->dbg_name))
274 clk->warned = true;
275 goto out;
276 }
277 if (clk->prepare_count == 1) {
278 struct clk *parent = clk_get_parent(clk);
279
280 if (WARN(!clk->warned && clk->count,
281 "%s: Don't call unprepare when the clock is enabled\n",
282 clk->dbg_name))
283 clk->warned = true;
284
285 if (clk->ops->unprepare)
286 clk->ops->unprepare(clk);
287 clk_unprepare(clk->depends);
288 clk_unprepare(parent);
289 }
290 clk->prepare_count--;
291out:
292 mutex_unlock(&clk->prepare_lock);
293}
294EXPORT_SYMBOL(clk_unprepare);
295
Daniel Walker5e96da52010-05-12 13:43:28 -0700296int clk_reset(struct clk *clk, enum clk_reset_action action)
297{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700298 if (IS_ERR_OR_NULL(clk))
299 return -EINVAL;
300
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301 if (!clk->ops->reset)
302 return -ENOSYS;
303
304 return clk->ops->reset(clk, action);
Daniel Walker5e96da52010-05-12 13:43:28 -0700305}
306EXPORT_SYMBOL(clk_reset);
307
Brian Swetland600f7cf2008-09-09 11:04:14 -0700308unsigned long clk_get_rate(struct clk *clk)
309{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700310 if (IS_ERR_OR_NULL(clk))
311 return 0;
312
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313 if (!clk->ops->get_rate)
Tianyi Gou7949ecb2012-02-14 14:25:32 -0800314 return clk->rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315
316 return clk->ops->get_rate(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700317}
318EXPORT_SYMBOL(clk_get_rate);
319
Matt Wagantall77952c42011-11-08 18:45:48 -0800320int clk_set_rate(struct clk *clk, unsigned long rate)
Brian Swetland600f7cf2008-09-09 11:04:14 -0700321{
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700322 unsigned long start_rate, flags;
323 int rc;
324
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700325 if (IS_ERR_OR_NULL(clk))
326 return -EINVAL;
327
Matt Wagantall77952c42011-11-08 18:45:48 -0800328 if (!clk->ops->set_rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700329 return -ENOSYS;
Daniel Walker3a790bb2010-12-13 14:35:10 -0800330
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700331 spin_lock_irqsave(&clk->lock, flags);
Stephen Boyd5bc44d52012-03-29 11:00:57 -0700332 trace_clock_set_rate(clk->dbg_name, rate, smp_processor_id());
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700333 if (clk->count) {
Matt Wagantall7205eea2011-11-04 17:31:29 -0700334 start_rate = clk->rate;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700335 /* Enforce vdd requirements for target frequency. */
336 rc = vote_rate_vdd(clk, rate);
337 if (rc)
338 goto err_vote_vdd;
Matt Wagantall77952c42011-11-08 18:45:48 -0800339 rc = clk->ops->set_rate(clk, rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700340 if (rc)
341 goto err_set_rate;
342 /* Release vdd requirements for starting frequency. */
343 unvote_rate_vdd(clk, start_rate);
344 } else {
Matt Wagantall77952c42011-11-08 18:45:48 -0800345 rc = clk->ops->set_rate(clk, rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700346 }
Matt Wagantall7205eea2011-11-04 17:31:29 -0700347
348 if (!rc)
349 clk->rate = rate;
350
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700351 spin_unlock_irqrestore(&clk->lock, flags);
352 return rc;
353
354err_set_rate:
355 unvote_rate_vdd(clk, rate);
356err_vote_vdd:
357 spin_unlock_irqrestore(&clk->lock, flags);
358 return rc;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700359}
Matt Wagantall77952c42011-11-08 18:45:48 -0800360EXPORT_SYMBOL(clk_set_rate);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700361
Daniel Walker5e96da52010-05-12 13:43:28 -0700362long clk_round_rate(struct clk *clk, unsigned long rate)
363{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700364 if (IS_ERR_OR_NULL(clk))
365 return -EINVAL;
366
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367 if (!clk->ops->round_rate)
368 return -ENOSYS;
369
370 return clk->ops->round_rate(clk, rate);
Daniel Walker5e96da52010-05-12 13:43:28 -0700371}
372EXPORT_SYMBOL(clk_round_rate);
373
Daniel Walker5e96da52010-05-12 13:43:28 -0700374int clk_set_max_rate(struct clk *clk, unsigned long rate)
375{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700376 if (IS_ERR_OR_NULL(clk))
377 return -EINVAL;
378
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700379 if (!clk->ops->set_max_rate)
380 return -ENOSYS;
381
382 return clk->ops->set_max_rate(clk, rate);
Daniel Walker5e96da52010-05-12 13:43:28 -0700383}
384EXPORT_SYMBOL(clk_set_max_rate);
385
Brian Swetland600f7cf2008-09-09 11:04:14 -0700386int clk_set_parent(struct clk *clk, struct clk *parent)
387{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388 if (!clk->ops->set_parent)
389 return 0;
390
391 return clk->ops->set_parent(clk, parent);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700392}
393EXPORT_SYMBOL(clk_set_parent);
394
395struct clk *clk_get_parent(struct clk *clk)
396{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700397 if (IS_ERR_OR_NULL(clk))
398 return NULL;
399
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700400 if (!clk->ops->get_parent)
401 return NULL;
402
403 return clk->ops->get_parent(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700404}
405EXPORT_SYMBOL(clk_get_parent);
406
407int clk_set_flags(struct clk *clk, unsigned long flags)
408{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700409 if (IS_ERR_OR_NULL(clk))
Brian Swetland600f7cf2008-09-09 11:04:14 -0700410 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700411 if (!clk->ops->set_flags)
412 return -ENOSYS;
413
414 return clk->ops->set_flags(clk, flags);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700415}
416EXPORT_SYMBOL(clk_set_flags);
417
Stephen Boydbb600ae2011-08-02 20:11:40 -0700418static struct clock_init_data __initdata *clk_init_data;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700419
Stephen Boydbb600ae2011-08-02 20:11:40 -0700420void __init msm_clock_init(struct clock_init_data *data)
Brian Swetland600f7cf2008-09-09 11:04:14 -0700421{
422 unsigned n;
Stephen Boyd94625ef2011-07-12 17:06:01 -0700423 struct clk_lookup *clock_tbl;
424 size_t num_clocks;
Matt Wagantallb37fea42012-04-04 16:47:23 -0700425 struct clk *clk;
Stephen Boydbb600ae2011-08-02 20:11:40 -0700426
427 clk_init_data = data;
Matt Wagantallb64888f2012-04-02 21:35:07 -0700428 if (clk_init_data->pre_init)
429 clk_init_data->pre_init();
Brian Swetland600f7cf2008-09-09 11:04:14 -0700430
Stephen Boyd94625ef2011-07-12 17:06:01 -0700431 clock_tbl = data->table;
432 num_clocks = data->size;
433
Stephen Boydbd323442011-02-23 09:37:42 -0800434 for (n = 0; n < num_clocks; n++) {
Matt Wagantallb37fea42012-04-04 16:47:23 -0700435 struct clk *parent;
436 clk = clock_tbl[n].clk;
437 parent = clk_get_parent(clk);
Matt Wagantalle1482bf2012-04-04 16:23:45 -0700438 if (parent && list_empty(&clk->siblings))
439 list_add(&clk->siblings, &parent->children);
Matt Wagantallb37fea42012-04-04 16:47:23 -0700440 }
441
442 /*
443 * Detect and preserve initial clock state until clock_late_init() or
444 * a driver explicitly changes it, whichever is first.
445 */
446 for (n = 0; n < num_clocks; n++) {
447 clk = clock_tbl[n].clk;
448 if (clk->ops->handoff && !(clk->flags & CLKFLAG_HANDOFF_RATE) &&
449 (clk->ops->handoff(clk) == HANDOFF_ENABLED_CLK)) {
450 clk->flags |= CLKFLAG_HANDOFF_RATE;
451 clk_prepare_enable(clk);
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700452 }
Stephen Boydbd323442011-02-23 09:37:42 -0800453 }
Daniel Walker5e96da52010-05-12 13:43:28 -0700454
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455 clkdev_add_table(clock_tbl, num_clocks);
Matt Wagantallb64888f2012-04-02 21:35:07 -0700456
457 if (clk_init_data->post_init)
458 clk_init_data->post_init();
Brian Swetland600f7cf2008-09-09 11:04:14 -0700459}
460
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700461/*
462 * The bootloader and/or AMSS may have left various clocks enabled.
463 * Disable any clocks that have not been explicitly enabled by a
464 * clk_enable() call and don't have the CLKFLAG_SKIP_AUTO_OFF flag.
Brian Swetland600f7cf2008-09-09 11:04:14 -0700465 */
466static int __init clock_late_init(void)
467{
Stephen Boydbb600ae2011-08-02 20:11:40 -0700468 unsigned n, count = 0;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700469 unsigned long flags;
Stephen Boydbb600ae2011-08-02 20:11:40 -0700470 int ret = 0;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700471
Stephen Boydbb600ae2011-08-02 20:11:40 -0700472 clock_debug_init(clk_init_data);
473 for (n = 0; n < clk_init_data->size; n++) {
474 struct clk *clk = clk_init_data->table[n].clk;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700475 bool handoff = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700476
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800477 clock_debug_add(clk);
Matt Wagantall8c9dc382012-04-03 18:45:16 -0700478 spin_lock_irqsave(&clk->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479 if (!(clk->flags & CLKFLAG_SKIP_AUTO_OFF)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700480 if (!clk->count && clk->ops->auto_off) {
Brian Swetland600f7cf2008-09-09 11:04:14 -0700481 count++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482 clk->ops->auto_off(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700483 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700484 }
Matt Wagantall8c9dc382012-04-03 18:45:16 -0700485 if (clk->flags & CLKFLAG_HANDOFF_RATE) {
486 clk->flags &= ~CLKFLAG_HANDOFF_RATE;
487 handoff = true;
488 }
489 spin_unlock_irqrestore(&clk->lock, flags);
490 /*
491 * Calling this outside the lock is safe since
492 * it doesn't need to be atomic with the flag change.
493 */
494 if (handoff)
495 clk_disable_unprepare(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700496 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700497 pr_info("clock_late_init() disabled %d unused clocks\n", count);
Stephen Boydbb600ae2011-08-02 20:11:40 -0700498 if (clk_init_data->late_init)
499 ret = clk_init_data->late_init();
500 return ret;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700501}
Brian Swetland600f7cf2008-09-09 11:04:14 -0700502late_initcall(clock_late_init);