blob: dcded38df843da604f6a0e2b141d3649e009718a [file] [log] [blame]
Brian Swetland600f7cf2008-09-09 11:04:14 -07001/* arch/arm/mach-msm/clock.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Tianyi Gou7949ecb2012-02-14 14:25:32 -08004 * Copyright (c) 2007-2012, Code Aurora Forum. All rights reserved.
Brian Swetland600f7cf2008-09-09 11:04:14 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Brian Swetland600f7cf2008-09-09 11:04:14 -070017#include <linux/kernel.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070018#include <linux/err.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070019#include <linux/spinlock.h>
Stephen Boydbd323442011-02-23 09:37:42 -080020#include <linux/string.h>
21#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <linux/clk.h>
Stephen Boydbd323442011-02-23 09:37:42 -080023#include <linux/clkdev.h>
Stephen Boyd5bc44d52012-03-29 11:00:57 -070024#include <trace/events/power.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070025
26#include "clock.h"
Brian Swetland600f7cf2008-09-09 11:04:14 -070027
Matt Wagantalle18bbc82011-10-06 10:07:28 -070028/* Find the voltage level required for a given rate. */
29static int find_vdd_level(struct clk *clk, unsigned long rate)
30{
31 int level;
32
33 for (level = 0; level < ARRAY_SIZE(clk->fmax); level++)
34 if (rate <= clk->fmax[level])
35 break;
36
37 if (level == ARRAY_SIZE(clk->fmax)) {
38 pr_err("Rate %lu for %s is greater than highest Fmax\n", rate,
39 clk->dbg_name);
40 return -EINVAL;
41 }
42
43 return level;
44}
45
46/* Update voltage level given the current votes. */
47static int update_vdd(struct clk_vdd_class *vdd_class)
48{
49 int level, rc;
50
51 for (level = ARRAY_SIZE(vdd_class->level_votes)-1; level > 0; level--)
52 if (vdd_class->level_votes[level])
53 break;
54
55 if (level == vdd_class->cur_level)
56 return 0;
57
58 rc = vdd_class->set_vdd(vdd_class, level);
59 if (!rc)
60 vdd_class->cur_level = level;
61
62 return rc;
63}
64
65/* Vote for a voltage level. */
66int vote_vdd_level(struct clk_vdd_class *vdd_class, int level)
67{
68 unsigned long flags;
69 int rc;
70
71 spin_lock_irqsave(&vdd_class->lock, flags);
72 vdd_class->level_votes[level]++;
73 rc = update_vdd(vdd_class);
74 if (rc)
75 vdd_class->level_votes[level]--;
76 spin_unlock_irqrestore(&vdd_class->lock, flags);
77
78 return rc;
79}
80
81/* Remove vote for a voltage level. */
82int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level)
83{
84 unsigned long flags;
85 int rc = 0;
86
87 spin_lock_irqsave(&vdd_class->lock, flags);
88 if (WARN(!vdd_class->level_votes[level],
89 "Reference counts are incorrect for %s level %d\n",
90 vdd_class->class_name, level))
91 goto out;
92 vdd_class->level_votes[level]--;
93 rc = update_vdd(vdd_class);
94 if (rc)
95 vdd_class->level_votes[level]++;
96out:
97 spin_unlock_irqrestore(&vdd_class->lock, flags);
98 return rc;
99}
100
101/* Vote for a voltage level corresponding to a clock's rate. */
102static int vote_rate_vdd(struct clk *clk, unsigned long rate)
103{
104 int level;
105
106 if (!clk->vdd_class)
107 return 0;
108
109 level = find_vdd_level(clk, rate);
110 if (level < 0)
111 return level;
112
113 return vote_vdd_level(clk->vdd_class, level);
114}
115
116/* Remove vote for a voltage level corresponding to a clock's rate. */
117static void unvote_rate_vdd(struct clk *clk, unsigned long rate)
118{
119 int level;
120
121 if (!clk->vdd_class)
122 return;
123
124 level = find_vdd_level(clk, rate);
125 if (level < 0)
126 return;
127
128 unvote_vdd_level(clk->vdd_class, level);
129}
130
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800131int clk_prepare(struct clk *clk)
132{
133 int ret = 0;
134 struct clk *parent;
135 if (!clk)
136 return 0;
137
138 mutex_lock(&clk->prepare_lock);
139 if (clk->prepare_count == 0) {
140 parent = clk_get_parent(clk);
141
142 ret = clk_prepare(parent);
143 if (ret)
144 goto out;
145 ret = clk_prepare(clk->depends);
146 if (ret)
147 goto err_prepare_depends;
148
149 if (clk->ops->prepare)
150 ret = clk->ops->prepare(clk);
151 if (ret)
152 goto err_prepare_clock;
153 }
154 clk->prepare_count++;
155out:
156 mutex_unlock(&clk->prepare_lock);
157 return ret;
158err_prepare_clock:
159 clk_unprepare(clk->depends);
160err_prepare_depends:
161 clk_unprepare(parent);
162 goto out;
163}
164EXPORT_SYMBOL(clk_prepare);
165
Brian Swetland600f7cf2008-09-09 11:04:14 -0700166/*
Brian Swetland600f7cf2008-09-09 11:04:14 -0700167 * Standard clock functions defined in include/linux/clk.h
168 */
Brian Swetland600f7cf2008-09-09 11:04:14 -0700169int clk_enable(struct clk *clk)
170{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171 int ret = 0;
Matt Wagantall7205eea2011-11-04 17:31:29 -0700172 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700173 struct clk *parent;
174
175 if (!clk)
176 return 0;
177
178 spin_lock_irqsave(&clk->lock, flags);
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800179 if (WARN(!clk->warned && !clk->prepare_count,
180 "%s: Don't call enable on unprepared clocks\n",
181 clk->dbg_name))
182 clk->warned = true;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183 if (clk->count == 0) {
184 parent = clk_get_parent(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700185
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700186 ret = clk_enable(parent);
187 if (ret)
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700188 goto err_enable_parent;
Stephen Boyd7fa26742011-08-11 23:22:29 -0700189 ret = clk_enable(clk->depends);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700190 if (ret)
191 goto err_enable_depends;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192
Matt Wagantall7205eea2011-11-04 17:31:29 -0700193 ret = vote_rate_vdd(clk, clk->rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700194 if (ret)
195 goto err_vote_vdd;
Stephen Boyd5bc44d52012-03-29 11:00:57 -0700196 trace_clock_enable(clk->dbg_name, 1, smp_processor_id());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197 if (clk->ops->enable)
198 ret = clk->ops->enable(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700199 if (ret)
200 goto err_enable_clock;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700201 } else if (clk->flags & CLKFLAG_HANDOFF_RATE) {
202 /*
203 * The clock was already enabled by handoff code so there is no
204 * need to enable it again here. Clearing the handoff flag will
205 * prevent the lateinit handoff code from disabling the clock if
206 * a client driver still has it enabled.
207 */
208 clk->flags &= ~CLKFLAG_HANDOFF_RATE;
209 goto out;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700211 clk->count++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700212out:
213 spin_unlock_irqrestore(&clk->lock, flags);
214
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700215 return 0;
216
217err_enable_clock:
Matt Wagantall7205eea2011-11-04 17:31:29 -0700218 unvote_rate_vdd(clk, clk->rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700219err_vote_vdd:
220 clk_disable(clk->depends);
221err_enable_depends:
222 clk_disable(parent);
223err_enable_parent:
224 spin_unlock_irqrestore(&clk->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225 return ret;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700226}
227EXPORT_SYMBOL(clk_enable);
228
229void clk_disable(struct clk *clk)
230{
231 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232
233 if (!clk)
234 return;
235
236 spin_lock_irqsave(&clk->lock, flags);
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800237 if (WARN(!clk->warned && !clk->prepare_count,
238 "%s: Never called prepare or calling disable "
239 "after unprepare\n",
240 clk->dbg_name))
241 clk->warned = true;
Stephen Boydd906b522011-07-26 10:51:41 -0700242 if (WARN(clk->count == 0, "%s is unbalanced", clk->dbg_name))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243 goto out;
244 if (clk->count == 1) {
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700245 struct clk *parent = clk_get_parent(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700246
Stephen Boyd5bc44d52012-03-29 11:00:57 -0700247 trace_clock_disable(clk->dbg_name, 0, smp_processor_id());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700248 if (clk->ops->disable)
249 clk->ops->disable(clk);
Matt Wagantall7205eea2011-11-04 17:31:29 -0700250 unvote_rate_vdd(clk, clk->rate);
Stephen Boyd7fa26742011-08-11 23:22:29 -0700251 clk_disable(clk->depends);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700252 clk_disable(parent);
253 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700254 clk->count--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255out:
256 spin_unlock_irqrestore(&clk->lock, flags);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700257}
258EXPORT_SYMBOL(clk_disable);
259
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800260void clk_unprepare(struct clk *clk)
261{
262 if (!clk)
263 return;
264
265 mutex_lock(&clk->prepare_lock);
266 if (!clk->prepare_count) {
267 if (WARN(!clk->warned, "%s is unbalanced (prepare)",
268 clk->dbg_name))
269 clk->warned = true;
270 goto out;
271 }
272 if (clk->prepare_count == 1) {
273 struct clk *parent = clk_get_parent(clk);
274
275 if (WARN(!clk->warned && clk->count,
276 "%s: Don't call unprepare when the clock is enabled\n",
277 clk->dbg_name))
278 clk->warned = true;
279
280 if (clk->ops->unprepare)
281 clk->ops->unprepare(clk);
282 clk_unprepare(clk->depends);
283 clk_unprepare(parent);
284 }
285 clk->prepare_count--;
286out:
287 mutex_unlock(&clk->prepare_lock);
288}
289EXPORT_SYMBOL(clk_unprepare);
290
Daniel Walker5e96da52010-05-12 13:43:28 -0700291int clk_reset(struct clk *clk, enum clk_reset_action action)
292{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700293 if (!clk->ops->reset)
294 return -ENOSYS;
295
296 return clk->ops->reset(clk, action);
Daniel Walker5e96da52010-05-12 13:43:28 -0700297}
298EXPORT_SYMBOL(clk_reset);
299
Brian Swetland600f7cf2008-09-09 11:04:14 -0700300unsigned long clk_get_rate(struct clk *clk)
301{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700302 if (!clk->ops->get_rate)
Tianyi Gou7949ecb2012-02-14 14:25:32 -0800303 return clk->rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700304
305 return clk->ops->get_rate(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700306}
307EXPORT_SYMBOL(clk_get_rate);
308
Matt Wagantall77952c42011-11-08 18:45:48 -0800309int clk_set_rate(struct clk *clk, unsigned long rate)
Brian Swetland600f7cf2008-09-09 11:04:14 -0700310{
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700311 unsigned long start_rate, flags;
312 int rc;
313
Matt Wagantall77952c42011-11-08 18:45:48 -0800314 if (!clk->ops->set_rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315 return -ENOSYS;
Daniel Walker3a790bb2010-12-13 14:35:10 -0800316
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700317 spin_lock_irqsave(&clk->lock, flags);
Stephen Boyd5bc44d52012-03-29 11:00:57 -0700318 trace_clock_set_rate(clk->dbg_name, rate, smp_processor_id());
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700319 if (clk->count) {
Matt Wagantall7205eea2011-11-04 17:31:29 -0700320 start_rate = clk->rate;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700321 /* Enforce vdd requirements for target frequency. */
322 rc = vote_rate_vdd(clk, rate);
323 if (rc)
324 goto err_vote_vdd;
Matt Wagantall77952c42011-11-08 18:45:48 -0800325 rc = clk->ops->set_rate(clk, rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700326 if (rc)
327 goto err_set_rate;
328 /* Release vdd requirements for starting frequency. */
329 unvote_rate_vdd(clk, start_rate);
330 } else {
Matt Wagantall77952c42011-11-08 18:45:48 -0800331 rc = clk->ops->set_rate(clk, rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700332 }
Matt Wagantall7205eea2011-11-04 17:31:29 -0700333
334 if (!rc)
335 clk->rate = rate;
336
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700337 spin_unlock_irqrestore(&clk->lock, flags);
338 return rc;
339
340err_set_rate:
341 unvote_rate_vdd(clk, rate);
342err_vote_vdd:
343 spin_unlock_irqrestore(&clk->lock, flags);
344 return rc;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700345}
Matt Wagantall77952c42011-11-08 18:45:48 -0800346EXPORT_SYMBOL(clk_set_rate);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700347
Daniel Walker5e96da52010-05-12 13:43:28 -0700348long clk_round_rate(struct clk *clk, unsigned long rate)
349{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350 if (!clk->ops->round_rate)
351 return -ENOSYS;
352
353 return clk->ops->round_rate(clk, rate);
Daniel Walker5e96da52010-05-12 13:43:28 -0700354}
355EXPORT_SYMBOL(clk_round_rate);
356
Daniel Walker5e96da52010-05-12 13:43:28 -0700357int clk_set_max_rate(struct clk *clk, unsigned long rate)
358{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359 if (!clk->ops->set_max_rate)
360 return -ENOSYS;
361
362 return clk->ops->set_max_rate(clk, rate);
Daniel Walker5e96da52010-05-12 13:43:28 -0700363}
364EXPORT_SYMBOL(clk_set_max_rate);
365
Brian Swetland600f7cf2008-09-09 11:04:14 -0700366int clk_set_parent(struct clk *clk, struct clk *parent)
367{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700368 if (!clk->ops->set_parent)
369 return 0;
370
371 return clk->ops->set_parent(clk, parent);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700372}
373EXPORT_SYMBOL(clk_set_parent);
374
375struct clk *clk_get_parent(struct clk *clk)
376{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377 if (!clk->ops->get_parent)
378 return NULL;
379
380 return clk->ops->get_parent(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700381}
382EXPORT_SYMBOL(clk_get_parent);
383
384int clk_set_flags(struct clk *clk, unsigned long flags)
385{
386 if (clk == NULL || IS_ERR(clk))
387 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388 if (!clk->ops->set_flags)
389 return -ENOSYS;
390
391 return clk->ops->set_flags(clk, flags);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700392}
393EXPORT_SYMBOL(clk_set_flags);
394
Stephen Boydbb600ae2011-08-02 20:11:40 -0700395static struct clock_init_data __initdata *clk_init_data;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700396
Stephen Boydbb600ae2011-08-02 20:11:40 -0700397void __init msm_clock_init(struct clock_init_data *data)
Brian Swetland600f7cf2008-09-09 11:04:14 -0700398{
399 unsigned n;
Stephen Boyd94625ef2011-07-12 17:06:01 -0700400 struct clk_lookup *clock_tbl;
401 size_t num_clocks;
Stephen Boydbb600ae2011-08-02 20:11:40 -0700402
403 clk_init_data = data;
Matt Wagantallb64888f2012-04-02 21:35:07 -0700404 if (clk_init_data->pre_init)
405 clk_init_data->pre_init();
Brian Swetland600f7cf2008-09-09 11:04:14 -0700406
Stephen Boyd94625ef2011-07-12 17:06:01 -0700407 clock_tbl = data->table;
408 num_clocks = data->size;
409
Stephen Boydbd323442011-02-23 09:37:42 -0800410 for (n = 0; n < num_clocks; n++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700411 struct clk *clk = clock_tbl[n].clk;
412 struct clk *parent = clk_get_parent(clk);
Matt Wagantalle1482bf2012-04-04 16:23:45 -0700413 if (parent && list_empty(&clk->siblings))
414 list_add(&clk->siblings, &parent->children);
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700415 if (clk->ops->handoff && !(clk->flags & CLKFLAG_HANDOFF_RATE)) {
Matt Wagantalla15833b2012-04-03 11:00:56 -0700416 if (clk->ops->handoff(clk) == HANDOFF_ENABLED_CLK) {
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700417 clk->flags |= CLKFLAG_HANDOFF_RATE;
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800418 clk_prepare_enable(clk);
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700419 }
420 }
Stephen Boydbd323442011-02-23 09:37:42 -0800421 }
Daniel Walker5e96da52010-05-12 13:43:28 -0700422
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423 clkdev_add_table(clock_tbl, num_clocks);
Matt Wagantallb64888f2012-04-02 21:35:07 -0700424
425 if (clk_init_data->post_init)
426 clk_init_data->post_init();
Brian Swetland600f7cf2008-09-09 11:04:14 -0700427}
428
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429/*
430 * The bootloader and/or AMSS may have left various clocks enabled.
431 * Disable any clocks that have not been explicitly enabled by a
432 * clk_enable() call and don't have the CLKFLAG_SKIP_AUTO_OFF flag.
Brian Swetland600f7cf2008-09-09 11:04:14 -0700433 */
434static int __init clock_late_init(void)
435{
Stephen Boydbb600ae2011-08-02 20:11:40 -0700436 unsigned n, count = 0;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700437 unsigned long flags;
Stephen Boydbb600ae2011-08-02 20:11:40 -0700438 int ret = 0;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700439
Stephen Boydbb600ae2011-08-02 20:11:40 -0700440 clock_debug_init(clk_init_data);
441 for (n = 0; n < clk_init_data->size; n++) {
442 struct clk *clk = clk_init_data->table[n].clk;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700443 bool handoff = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800445 clock_debug_add(clk);
Matt Wagantall8c9dc382012-04-03 18:45:16 -0700446 spin_lock_irqsave(&clk->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447 if (!(clk->flags & CLKFLAG_SKIP_AUTO_OFF)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 if (!clk->count && clk->ops->auto_off) {
Brian Swetland600f7cf2008-09-09 11:04:14 -0700449 count++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700450 clk->ops->auto_off(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700451 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700452 }
Matt Wagantall8c9dc382012-04-03 18:45:16 -0700453 if (clk->flags & CLKFLAG_HANDOFF_RATE) {
454 clk->flags &= ~CLKFLAG_HANDOFF_RATE;
455 handoff = true;
456 }
457 spin_unlock_irqrestore(&clk->lock, flags);
458 /*
459 * Calling this outside the lock is safe since
460 * it doesn't need to be atomic with the flag change.
461 */
462 if (handoff)
463 clk_disable_unprepare(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700464 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700465 pr_info("clock_late_init() disabled %d unused clocks\n", count);
Stephen Boydbb600ae2011-08-02 20:11:40 -0700466 if (clk_init_data->late_init)
467 ret = clk_init_data->late_init();
468 return ret;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700469}
Brian Swetland600f7cf2008-09-09 11:04:14 -0700470late_initcall(clock_late_init);