blob: 5b89fa95d222ade4a3ceac4ab62921c9fc50823f [file] [log] [blame]
Brian Swetland600f7cf2008-09-09 11:04:14 -07001/* arch/arm/mach-msm/clock.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Tianyi Gou7949ecb2012-02-14 14:25:32 -08004 * Copyright (c) 2007-2012, Code Aurora Forum. All rights reserved.
Brian Swetland600f7cf2008-09-09 11:04:14 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Brian Swetland600f7cf2008-09-09 11:04:14 -070017#include <linux/kernel.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070018#include <linux/err.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070019#include <linux/spinlock.h>
Stephen Boydbd323442011-02-23 09:37:42 -080020#include <linux/string.h>
21#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <linux/clk.h>
Stephen Boydbd323442011-02-23 09:37:42 -080023#include <linux/clkdev.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070024
25#include "clock.h"
Brian Swetland600f7cf2008-09-09 11:04:14 -070026
Matt Wagantalle18bbc82011-10-06 10:07:28 -070027/* Find the voltage level required for a given rate. */
28static int find_vdd_level(struct clk *clk, unsigned long rate)
29{
30 int level;
31
32 for (level = 0; level < ARRAY_SIZE(clk->fmax); level++)
33 if (rate <= clk->fmax[level])
34 break;
35
36 if (level == ARRAY_SIZE(clk->fmax)) {
37 pr_err("Rate %lu for %s is greater than highest Fmax\n", rate,
38 clk->dbg_name);
39 return -EINVAL;
40 }
41
42 return level;
43}
44
45/* Update voltage level given the current votes. */
46static int update_vdd(struct clk_vdd_class *vdd_class)
47{
48 int level, rc;
49
50 for (level = ARRAY_SIZE(vdd_class->level_votes)-1; level > 0; level--)
51 if (vdd_class->level_votes[level])
52 break;
53
54 if (level == vdd_class->cur_level)
55 return 0;
56
57 rc = vdd_class->set_vdd(vdd_class, level);
58 if (!rc)
59 vdd_class->cur_level = level;
60
61 return rc;
62}
63
64/* Vote for a voltage level. */
65int vote_vdd_level(struct clk_vdd_class *vdd_class, int level)
66{
67 unsigned long flags;
68 int rc;
69
70 spin_lock_irqsave(&vdd_class->lock, flags);
71 vdd_class->level_votes[level]++;
72 rc = update_vdd(vdd_class);
73 if (rc)
74 vdd_class->level_votes[level]--;
75 spin_unlock_irqrestore(&vdd_class->lock, flags);
76
77 return rc;
78}
79
80/* Remove vote for a voltage level. */
81int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level)
82{
83 unsigned long flags;
84 int rc = 0;
85
86 spin_lock_irqsave(&vdd_class->lock, flags);
87 if (WARN(!vdd_class->level_votes[level],
88 "Reference counts are incorrect for %s level %d\n",
89 vdd_class->class_name, level))
90 goto out;
91 vdd_class->level_votes[level]--;
92 rc = update_vdd(vdd_class);
93 if (rc)
94 vdd_class->level_votes[level]++;
95out:
96 spin_unlock_irqrestore(&vdd_class->lock, flags);
97 return rc;
98}
99
100/* Vote for a voltage level corresponding to a clock's rate. */
101static int vote_rate_vdd(struct clk *clk, unsigned long rate)
102{
103 int level;
104
105 if (!clk->vdd_class)
106 return 0;
107
108 level = find_vdd_level(clk, rate);
109 if (level < 0)
110 return level;
111
112 return vote_vdd_level(clk->vdd_class, level);
113}
114
115/* Remove vote for a voltage level corresponding to a clock's rate. */
116static void unvote_rate_vdd(struct clk *clk, unsigned long rate)
117{
118 int level;
119
120 if (!clk->vdd_class)
121 return;
122
123 level = find_vdd_level(clk, rate);
124 if (level < 0)
125 return;
126
127 unvote_vdd_level(clk->vdd_class, level);
128}
129
Brian Swetland600f7cf2008-09-09 11:04:14 -0700130/*
Brian Swetland600f7cf2008-09-09 11:04:14 -0700131 * Standard clock functions defined in include/linux/clk.h
132 */
Brian Swetland600f7cf2008-09-09 11:04:14 -0700133int clk_enable(struct clk *clk)
134{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135 int ret = 0;
Matt Wagantall7205eea2011-11-04 17:31:29 -0700136 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700137 struct clk *parent;
138
139 if (!clk)
140 return 0;
141
142 spin_lock_irqsave(&clk->lock, flags);
143 if (clk->count == 0) {
144 parent = clk_get_parent(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700145
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146 ret = clk_enable(parent);
147 if (ret)
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700148 goto err_enable_parent;
Stephen Boyd7fa26742011-08-11 23:22:29 -0700149 ret = clk_enable(clk->depends);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700150 if (ret)
151 goto err_enable_depends;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700152
Matt Wagantall7205eea2011-11-04 17:31:29 -0700153 ret = vote_rate_vdd(clk, clk->rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700154 if (ret)
155 goto err_vote_vdd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156 if (clk->ops->enable)
157 ret = clk->ops->enable(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700158 if (ret)
159 goto err_enable_clock;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700160 } else if (clk->flags & CLKFLAG_HANDOFF_RATE) {
161 /*
162 * The clock was already enabled by handoff code so there is no
163 * need to enable it again here. Clearing the handoff flag will
164 * prevent the lateinit handoff code from disabling the clock if
165 * a client driver still has it enabled.
166 */
167 clk->flags &= ~CLKFLAG_HANDOFF_RATE;
168 goto out;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700170 clk->count++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171out:
172 spin_unlock_irqrestore(&clk->lock, flags);
173
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700174 return 0;
175
176err_enable_clock:
Matt Wagantall7205eea2011-11-04 17:31:29 -0700177 unvote_rate_vdd(clk, clk->rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700178err_vote_vdd:
179 clk_disable(clk->depends);
180err_enable_depends:
181 clk_disable(parent);
182err_enable_parent:
183 spin_unlock_irqrestore(&clk->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184 return ret;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700185}
186EXPORT_SYMBOL(clk_enable);
187
188void clk_disable(struct clk *clk)
189{
190 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191
192 if (!clk)
193 return;
194
195 spin_lock_irqsave(&clk->lock, flags);
Stephen Boydd906b522011-07-26 10:51:41 -0700196 if (WARN(clk->count == 0, "%s is unbalanced", clk->dbg_name))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197 goto out;
198 if (clk->count == 1) {
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700199 struct clk *parent = clk_get_parent(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700200
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700201 if (clk->ops->disable)
202 clk->ops->disable(clk);
Matt Wagantall7205eea2011-11-04 17:31:29 -0700203 unvote_rate_vdd(clk, clk->rate);
Stephen Boyd7fa26742011-08-11 23:22:29 -0700204 clk_disable(clk->depends);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700205 clk_disable(parent);
206 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700207 clk->count--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208out:
209 spin_unlock_irqrestore(&clk->lock, flags);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700210}
211EXPORT_SYMBOL(clk_disable);
212
Daniel Walker5e96da52010-05-12 13:43:28 -0700213int clk_reset(struct clk *clk, enum clk_reset_action action)
214{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 if (!clk->ops->reset)
216 return -ENOSYS;
217
218 return clk->ops->reset(clk, action);
Daniel Walker5e96da52010-05-12 13:43:28 -0700219}
220EXPORT_SYMBOL(clk_reset);
221
Brian Swetland600f7cf2008-09-09 11:04:14 -0700222unsigned long clk_get_rate(struct clk *clk)
223{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224 if (!clk->ops->get_rate)
Tianyi Gou7949ecb2012-02-14 14:25:32 -0800225 return clk->rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226
227 return clk->ops->get_rate(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700228}
229EXPORT_SYMBOL(clk_get_rate);
230
Matt Wagantall77952c42011-11-08 18:45:48 -0800231int clk_set_rate(struct clk *clk, unsigned long rate)
Brian Swetland600f7cf2008-09-09 11:04:14 -0700232{
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700233 unsigned long start_rate, flags;
234 int rc;
235
Matt Wagantall77952c42011-11-08 18:45:48 -0800236 if (!clk->ops->set_rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237 return -ENOSYS;
Daniel Walker3a790bb2010-12-13 14:35:10 -0800238
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700239 spin_lock_irqsave(&clk->lock, flags);
240 if (clk->count) {
Matt Wagantall7205eea2011-11-04 17:31:29 -0700241 start_rate = clk->rate;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700242 /* Enforce vdd requirements for target frequency. */
243 rc = vote_rate_vdd(clk, rate);
244 if (rc)
245 goto err_vote_vdd;
Matt Wagantall77952c42011-11-08 18:45:48 -0800246 rc = clk->ops->set_rate(clk, rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700247 if (rc)
248 goto err_set_rate;
249 /* Release vdd requirements for starting frequency. */
250 unvote_rate_vdd(clk, start_rate);
251 } else {
Matt Wagantall77952c42011-11-08 18:45:48 -0800252 rc = clk->ops->set_rate(clk, rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700253 }
Matt Wagantall7205eea2011-11-04 17:31:29 -0700254
255 if (!rc)
256 clk->rate = rate;
257
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700258 spin_unlock_irqrestore(&clk->lock, flags);
259 return rc;
260
261err_set_rate:
262 unvote_rate_vdd(clk, rate);
263err_vote_vdd:
264 spin_unlock_irqrestore(&clk->lock, flags);
265 return rc;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700266}
Matt Wagantall77952c42011-11-08 18:45:48 -0800267EXPORT_SYMBOL(clk_set_rate);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700268
Daniel Walker5e96da52010-05-12 13:43:28 -0700269long clk_round_rate(struct clk *clk, unsigned long rate)
270{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271 if (!clk->ops->round_rate)
272 return -ENOSYS;
273
274 return clk->ops->round_rate(clk, rate);
Daniel Walker5e96da52010-05-12 13:43:28 -0700275}
276EXPORT_SYMBOL(clk_round_rate);
277
Daniel Walker5e96da52010-05-12 13:43:28 -0700278int clk_set_max_rate(struct clk *clk, unsigned long rate)
279{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280 if (!clk->ops->set_max_rate)
281 return -ENOSYS;
282
283 return clk->ops->set_max_rate(clk, rate);
Daniel Walker5e96da52010-05-12 13:43:28 -0700284}
285EXPORT_SYMBOL(clk_set_max_rate);
286
Brian Swetland600f7cf2008-09-09 11:04:14 -0700287int clk_set_parent(struct clk *clk, struct clk *parent)
288{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700289 if (!clk->ops->set_parent)
290 return 0;
291
292 return clk->ops->set_parent(clk, parent);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700293}
294EXPORT_SYMBOL(clk_set_parent);
295
296struct clk *clk_get_parent(struct clk *clk)
297{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700298 if (!clk->ops->get_parent)
299 return NULL;
300
301 return clk->ops->get_parent(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700302}
303EXPORT_SYMBOL(clk_get_parent);
304
305int clk_set_flags(struct clk *clk, unsigned long flags)
306{
307 if (clk == NULL || IS_ERR(clk))
308 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309 if (!clk->ops->set_flags)
310 return -ENOSYS;
311
312 return clk->ops->set_flags(clk, flags);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700313}
314EXPORT_SYMBOL(clk_set_flags);
315
Stephen Boydbb600ae2011-08-02 20:11:40 -0700316static struct clock_init_data __initdata *clk_init_data;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700317
Stephen Boydbb600ae2011-08-02 20:11:40 -0700318void __init msm_clock_init(struct clock_init_data *data)
Brian Swetland600f7cf2008-09-09 11:04:14 -0700319{
320 unsigned n;
Stephen Boyd94625ef2011-07-12 17:06:01 -0700321 struct clk_lookup *clock_tbl;
322 size_t num_clocks;
Stephen Boydbb600ae2011-08-02 20:11:40 -0700323
324 clk_init_data = data;
325 if (clk_init_data->init)
326 clk_init_data->init();
Brian Swetland600f7cf2008-09-09 11:04:14 -0700327
Stephen Boyd94625ef2011-07-12 17:06:01 -0700328 clock_tbl = data->table;
329 num_clocks = data->size;
330
Stephen Boydbd323442011-02-23 09:37:42 -0800331 for (n = 0; n < num_clocks; n++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700332 struct clk *clk = clock_tbl[n].clk;
333 struct clk *parent = clk_get_parent(clk);
334 clk_set_parent(clk, parent);
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700335 if (clk->ops->handoff && !(clk->flags & CLKFLAG_HANDOFF_RATE)) {
336 if (clk->ops->handoff(clk)) {
337 clk->flags |= CLKFLAG_HANDOFF_RATE;
338 clk_enable(clk);
339 }
340 }
Stephen Boydbd323442011-02-23 09:37:42 -0800341 }
Daniel Walker5e96da52010-05-12 13:43:28 -0700342
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343 clkdev_add_table(clock_tbl, num_clocks);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700344}
345
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700346/*
347 * The bootloader and/or AMSS may have left various clocks enabled.
348 * Disable any clocks that have not been explicitly enabled by a
349 * clk_enable() call and don't have the CLKFLAG_SKIP_AUTO_OFF flag.
Brian Swetland600f7cf2008-09-09 11:04:14 -0700350 */
351static int __init clock_late_init(void)
352{
Stephen Boydbb600ae2011-08-02 20:11:40 -0700353 unsigned n, count = 0;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700354 unsigned long flags;
Stephen Boydbb600ae2011-08-02 20:11:40 -0700355 int ret = 0;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700356
Stephen Boydbb600ae2011-08-02 20:11:40 -0700357 clock_debug_init(clk_init_data);
358 for (n = 0; n < clk_init_data->size; n++) {
359 struct clk *clk = clk_init_data->table[n].clk;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700360 bool handoff = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800362 clock_debug_add(clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700363 if (!(clk->flags & CLKFLAG_SKIP_AUTO_OFF)) {
364 spin_lock_irqsave(&clk->lock, flags);
365 if (!clk->count && clk->ops->auto_off) {
Brian Swetland600f7cf2008-09-09 11:04:14 -0700366 count++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367 clk->ops->auto_off(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700368 }
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700369 if (clk->flags & CLKFLAG_HANDOFF_RATE) {
370 clk->flags &= ~CLKFLAG_HANDOFF_RATE;
371 handoff = true;
372 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373 spin_unlock_irqrestore(&clk->lock, flags);
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700374 /*
375 * Calling clk_disable() outside the lock is safe since
376 * it doesn't need to be atomic with the flag change.
377 */
378 if (handoff)
379 clk_disable(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700380 }
381 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700382 pr_info("clock_late_init() disabled %d unused clocks\n", count);
Stephen Boydbb600ae2011-08-02 20:11:40 -0700383 if (clk_init_data->late_init)
384 ret = clk_init_data->late_init();
385 return ret;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700386}
Brian Swetland600f7cf2008-09-09 11:04:14 -0700387late_initcall(clock_late_init);