blob: 608018cd6c2f027cd9f8081e3f8d8d898d0998b8 [file] [log] [blame]
Brian Swetland600f7cf2008-09-09 11:04:14 -07001/* arch/arm/mach-msm/clock.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Saravana Kannanc85ecf92013-01-21 17:58:35 -08004 * Copyright (c) 2007-2013, The Linux Foundation. All rights reserved.
Brian Swetland600f7cf2008-09-09 11:04:14 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Brian Swetland600f7cf2008-09-09 11:04:14 -070017#include <linux/kernel.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070018#include <linux/err.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070019#include <linux/spinlock.h>
Stephen Boydbd323442011-02-23 09:37:42 -080020#include <linux/string.h>
21#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <linux/clk.h>
Stephen Boydbd323442011-02-23 09:37:42 -080023#include <linux/clkdev.h>
Matt Wagantall158f73b2012-05-16 11:29:35 -070024#include <linux/list.h>
Patrick Dalyebc26bc2013-02-05 11:49:07 -080025#include <linux/regulator/consumer.h>
Stephen Boyd5bc44d52012-03-29 11:00:57 -070026#include <trace/events/power.h>
Matt Wagantall33d01f52012-02-23 23:27:44 -080027#include <mach/clk-provider.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070028#include "clock.h"
Brian Swetland600f7cf2008-09-09 11:04:14 -070029
Matt Wagantall158f73b2012-05-16 11:29:35 -070030struct handoff_clk {
31 struct list_head list;
32 struct clk *clk;
33};
34static LIST_HEAD(handoff_list);
35
Patrick Daly5ce9da32013-03-26 13:12:53 -070036struct handoff_vdd {
37 struct list_head list;
38 struct clk_vdd_class *vdd_class;
39};
40static LIST_HEAD(handoff_vdd_list);
41
Matt Wagantalle18bbc82011-10-06 10:07:28 -070042/* Find the voltage level required for a given rate. */
Patrick Daly0a78a0e2012-07-23 13:18:59 -070043int find_vdd_level(struct clk *clk, unsigned long rate)
Matt Wagantalle18bbc82011-10-06 10:07:28 -070044{
45 int level;
46
Saravana Kannan55e959d2012-10-15 22:16:04 -070047 for (level = 0; level < clk->num_fmax; level++)
Matt Wagantalle18bbc82011-10-06 10:07:28 -070048 if (rate <= clk->fmax[level])
49 break;
50
Saravana Kannan55e959d2012-10-15 22:16:04 -070051 if (level == clk->num_fmax) {
Matt Wagantalle18bbc82011-10-06 10:07:28 -070052 pr_err("Rate %lu for %s is greater than highest Fmax\n", rate,
53 clk->dbg_name);
54 return -EINVAL;
55 }
56
57 return level;
58}
59
60/* Update voltage level given the current votes. */
61static int update_vdd(struct clk_vdd_class *vdd_class)
62{
Patrick Dalyebc26bc2013-02-05 11:49:07 -080063 int level, rc = 0, i;
64 struct regulator **r = vdd_class->regulator;
Junjie Wubb5a79e2013-05-15 13:12:39 -070065 int *uv = vdd_class->vdd_uv;
66 int *ua = vdd_class->vdd_ua;
67 int n_reg = vdd_class->num_regulators;
68 int max_lvl = vdd_class->num_levels - 1;
69 int lvl_base;
Matt Wagantalle18bbc82011-10-06 10:07:28 -070070
Junjie Wubb5a79e2013-05-15 13:12:39 -070071 for (level = max_lvl; level > 0; level--)
Matt Wagantalle18bbc82011-10-06 10:07:28 -070072 if (vdd_class->level_votes[level])
73 break;
74
75 if (level == vdd_class->cur_level)
76 return 0;
77
Junjie Wubb5a79e2013-05-15 13:12:39 -070078 max_lvl = max_lvl * n_reg;
79 lvl_base = level * n_reg;
Patrick Dalyebc26bc2013-02-05 11:49:07 -080080 for (i = 0; i < vdd_class->num_regulators; i++) {
Junjie Wubb5a79e2013-05-15 13:12:39 -070081 rc = regulator_set_voltage(r[i], uv[lvl_base + i],
82 uv[max_lvl + i]);
Patrick Dalyebc26bc2013-02-05 11:49:07 -080083 if (rc)
84 goto set_voltage_fail;
Patrick Daly653c0b52013-04-16 17:18:28 -070085
Junjie Wubb5a79e2013-05-15 13:12:39 -070086 if (!ua)
Patrick Daly653c0b52013-04-16 17:18:28 -070087 continue;
88
Junjie Wubb5a79e2013-05-15 13:12:39 -070089 rc = regulator_set_optimum_mode(r[i], ua[lvl_base + i]);
Patrick Daly653c0b52013-04-16 17:18:28 -070090 if (rc < 0)
91 goto set_mode_fail;
Patrick Dalyebc26bc2013-02-05 11:49:07 -080092 }
93 if (vdd_class->set_vdd && !vdd_class->num_regulators)
94 rc = vdd_class->set_vdd(vdd_class, level);
95
Patrick Daly653c0b52013-04-16 17:18:28 -070096 if (rc < 0)
Matt Wagantalle18bbc82011-10-06 10:07:28 -070097 vdd_class->cur_level = level;
98
Patrick Daly653c0b52013-04-16 17:18:28 -070099 return 0;
100
101set_mode_fail:
Junjie Wubb5a79e2013-05-15 13:12:39 -0700102 regulator_set_voltage(r[i], uv[vdd_class->cur_level * n_reg + i],
103 uv[max_lvl + i]);
Patrick Dalyebc26bc2013-02-05 11:49:07 -0800104
105set_voltage_fail:
Junjie Wubb5a79e2013-05-15 13:12:39 -0700106 lvl_base = vdd_class->cur_level * n_reg;
Patrick Daly653c0b52013-04-16 17:18:28 -0700107 for (i--; i >= 0; i--) {
Junjie Wubb5a79e2013-05-15 13:12:39 -0700108 regulator_set_voltage(r[i], uv[lvl_base + i], uv[max_lvl + i]);
Patrick Daly653c0b52013-04-16 17:18:28 -0700109
Junjie Wubb5a79e2013-05-15 13:12:39 -0700110 if (!ua)
Patrick Daly653c0b52013-04-16 17:18:28 -0700111 continue;
Junjie Wubb5a79e2013-05-15 13:12:39 -0700112 regulator_set_optimum_mode(r[i], ua[lvl_base + i]);
Patrick Daly653c0b52013-04-16 17:18:28 -0700113 }
Patrick Dalyebc26bc2013-02-05 11:49:07 -0800114
115 return rc;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700116}
117
118/* Vote for a voltage level. */
119int vote_vdd_level(struct clk_vdd_class *vdd_class, int level)
120{
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700121 int rc;
122
Saravana Kannan55e959d2012-10-15 22:16:04 -0700123 if (level >= vdd_class->num_levels)
124 return -EINVAL;
125
Stephen Boyda1f610a2012-09-22 00:40:37 -0700126 mutex_lock(&vdd_class->lock);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700127 vdd_class->level_votes[level]++;
128 rc = update_vdd(vdd_class);
129 if (rc)
130 vdd_class->level_votes[level]--;
Stephen Boyda1f610a2012-09-22 00:40:37 -0700131 mutex_unlock(&vdd_class->lock);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700132
133 return rc;
134}
135
136/* Remove vote for a voltage level. */
137int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level)
138{
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700139 int rc = 0;
140
Saravana Kannan55e959d2012-10-15 22:16:04 -0700141 if (level >= vdd_class->num_levels)
142 return -EINVAL;
143
Stephen Boyda1f610a2012-09-22 00:40:37 -0700144 mutex_lock(&vdd_class->lock);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700145 if (WARN(!vdd_class->level_votes[level],
146 "Reference counts are incorrect for %s level %d\n",
147 vdd_class->class_name, level))
148 goto out;
149 vdd_class->level_votes[level]--;
150 rc = update_vdd(vdd_class);
151 if (rc)
152 vdd_class->level_votes[level]++;
153out:
Stephen Boyda1f610a2012-09-22 00:40:37 -0700154 mutex_unlock(&vdd_class->lock);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700155 return rc;
156}
157
158/* Vote for a voltage level corresponding to a clock's rate. */
159static int vote_rate_vdd(struct clk *clk, unsigned long rate)
160{
161 int level;
162
163 if (!clk->vdd_class)
164 return 0;
165
166 level = find_vdd_level(clk, rate);
167 if (level < 0)
168 return level;
169
170 return vote_vdd_level(clk->vdd_class, level);
171}
172
173/* Remove vote for a voltage level corresponding to a clock's rate. */
174static void unvote_rate_vdd(struct clk *clk, unsigned long rate)
175{
176 int level;
177
178 if (!clk->vdd_class)
179 return;
180
181 level = find_vdd_level(clk, rate);
182 if (level < 0)
183 return;
184
185 unvote_vdd_level(clk->vdd_class, level);
186}
187
Saravana Kannana8c91542013-03-18 21:15:18 -0700188/* Check if the rate is within the voltage limits of the clock. */
Patrick Dalyc009d9e2012-10-01 11:55:27 -0700189static bool is_rate_valid(struct clk *clk, unsigned long rate)
190{
191 int level;
192
193 if (!clk->vdd_class)
194 return true;
195
196 level = find_vdd_level(clk, rate);
197 return level >= 0;
198}
199
Saravana Kannan33c6a202013-03-20 22:19:10 -0700200/**
201 * __clk_pre_reparent() - Set up the new parent before switching to it and
202 * prevent the enable state of the child clock from changing.
203 * @c: The child clock that's going to switch parents
204 * @new: The new parent that the child clock is going to switch to
205 * @flags: Pointer to scratch space to save spinlock flags
206 *
207 * Cannot be called from atomic context.
208 *
209 * Use this API to set up the @new parent clock to be able to support the
210 * current prepare and enable state of the child clock @c. Once the parent is
211 * set up, the child clock can safely switch to it.
212 *
213 * The caller shall grab the prepare_lock of clock @c before calling this API
214 * and only release it after calling __clk_post_reparent() for clock @c (or
215 * if this API fails). This is necessary to prevent the prepare state of the
216 * child clock @c from changing while the reparenting is in progress. Since
217 * this API takes care of grabbing the enable lock of @c, only atomic
218 * operation are allowed between calls to __clk_pre_reparent and
219 * __clk_post_reparent()
220 *
221 * The scratch space pointed to by @flags should not be altered before
222 * calling __clk_post_reparent() for clock @c.
223 *
224 * See also: __clk_post_reparent()
225 */
226int __clk_pre_reparent(struct clk *c, struct clk *new, unsigned long *flags)
227{
228 int rc;
229
230 if (c->prepare_count) {
231 rc = clk_prepare(new);
232 if (rc)
233 return rc;
234 }
235
236 spin_lock_irqsave(&c->lock, *flags);
237 if (c->count) {
238 rc = clk_enable(new);
239 if (rc) {
240 spin_unlock_irqrestore(&c->lock, *flags);
241 clk_unprepare(new);
242 return rc;
243 }
244 }
245 return 0;
246}
247
248/**
249 * __clk_post_reparent() - Release requirements on old parent after switching
250 * away from it and allow changes to the child clock's enable state.
251 * @c: The child clock that switched parents
252 * @old: The old parent that the child clock switched away from or the new
253 * parent of a failed reparent attempt.
254 * @flags: Pointer to scratch space where spinlock flags were saved
255 *
256 * Cannot be called from atomic context.
257 *
258 * This API works in tandem with __clk_pre_reparent. Use this API to
259 * - Remove prepare and enable requirements from the @old parent after
260 * switching away from it
261 * - Or, undo the effects of __clk_pre_reparent() after a failed attempt to
262 * change parents
263 *
264 * The caller shall release the prepare_lock of @c that was grabbed before
265 * calling __clk_pre_reparent() only after this API is called (or if
266 * __clk_pre_reparent() fails). This is necessary to prevent the prepare
267 * state of the child clock @c from changing while the reparenting is in
268 * progress. Since this API releases the enable lock of @c, the limit to
269 * atomic operations set by __clk_pre_reparent() is no longer present.
270 *
271 * The scratch space pointed to by @flags shall not be altered since the call
272 * to __clk_pre_reparent() for clock @c.
273 *
274 * See also: __clk_pre_reparent()
275 */
276void __clk_post_reparent(struct clk *c, struct clk *old, unsigned long *flags)
277{
278 if (c->count)
279 clk_disable(old);
280 spin_unlock_irqrestore(&c->lock, *flags);
281
282 if (c->prepare_count)
283 clk_unprepare(old);
284}
285
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800286int clk_prepare(struct clk *clk)
287{
288 int ret = 0;
289 struct clk *parent;
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700290
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800291 if (!clk)
292 return 0;
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700293 if (IS_ERR(clk))
294 return -EINVAL;
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800295
296 mutex_lock(&clk->prepare_lock);
297 if (clk->prepare_count == 0) {
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700298 parent = clk->parent;
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800299
300 ret = clk_prepare(parent);
301 if (ret)
302 goto out;
303 ret = clk_prepare(clk->depends);
304 if (ret)
305 goto err_prepare_depends;
306
Stephen Boydd86d1f22012-01-24 17:36:34 -0800307 ret = vote_rate_vdd(clk, clk->rate);
308 if (ret)
309 goto err_vote_vdd;
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800310 if (clk->ops->prepare)
311 ret = clk->ops->prepare(clk);
312 if (ret)
313 goto err_prepare_clock;
314 }
315 clk->prepare_count++;
316out:
317 mutex_unlock(&clk->prepare_lock);
318 return ret;
319err_prepare_clock:
Stephen Boydd86d1f22012-01-24 17:36:34 -0800320 unvote_rate_vdd(clk, clk->rate);
321err_vote_vdd:
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800322 clk_unprepare(clk->depends);
323err_prepare_depends:
324 clk_unprepare(parent);
325 goto out;
326}
327EXPORT_SYMBOL(clk_prepare);
328
Brian Swetland600f7cf2008-09-09 11:04:14 -0700329/*
Brian Swetland600f7cf2008-09-09 11:04:14 -0700330 * Standard clock functions defined in include/linux/clk.h
331 */
Brian Swetland600f7cf2008-09-09 11:04:14 -0700332int clk_enable(struct clk *clk)
333{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700334 int ret = 0;
Matt Wagantall7205eea2011-11-04 17:31:29 -0700335 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336 struct clk *parent;
Stephen Boyd64dce902012-08-09 12:59:40 -0700337 const char *name = clk ? clk->dbg_name : NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700338
339 if (!clk)
340 return 0;
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700341 if (IS_ERR(clk))
342 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343
344 spin_lock_irqsave(&clk->lock, flags);
Stephen Boyd64dce902012-08-09 12:59:40 -0700345 WARN(!clk->prepare_count,
346 "%s: Don't call enable on unprepared clocks\n", name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700347 if (clk->count == 0) {
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700348 parent = clk->parent;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700349
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350 ret = clk_enable(parent);
351 if (ret)
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700352 goto err_enable_parent;
Stephen Boyd7fa26742011-08-11 23:22:29 -0700353 ret = clk_enable(clk->depends);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700354 if (ret)
355 goto err_enable_depends;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356
Stephen Boyd64dce902012-08-09 12:59:40 -0700357 trace_clock_enable(name, 1, smp_processor_id());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358 if (clk->ops->enable)
359 ret = clk->ops->enable(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700360 if (ret)
361 goto err_enable_clock;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700363 clk->count++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700364 spin_unlock_irqrestore(&clk->lock, flags);
365
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700366 return 0;
367
368err_enable_clock:
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700369 clk_disable(clk->depends);
370err_enable_depends:
371 clk_disable(parent);
372err_enable_parent:
373 spin_unlock_irqrestore(&clk->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374 return ret;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700375}
376EXPORT_SYMBOL(clk_enable);
377
378void clk_disable(struct clk *clk)
379{
Stephen Boyd64dce902012-08-09 12:59:40 -0700380 const char *name = clk ? clk->dbg_name : NULL;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700381 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700382
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700383 if (IS_ERR_OR_NULL(clk))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384 return;
385
386 spin_lock_irqsave(&clk->lock, flags);
Stephen Boyd64dce902012-08-09 12:59:40 -0700387 WARN(!clk->prepare_count,
388 "%s: Never called prepare or calling disable after unprepare\n",
389 name);
390 if (WARN(clk->count == 0, "%s is unbalanced", name))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700391 goto out;
392 if (clk->count == 1) {
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700393 struct clk *parent = clk->parent;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700394
Stephen Boyd64dce902012-08-09 12:59:40 -0700395 trace_clock_disable(name, 0, smp_processor_id());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 if (clk->ops->disable)
397 clk->ops->disable(clk);
Stephen Boyd7fa26742011-08-11 23:22:29 -0700398 clk_disable(clk->depends);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700399 clk_disable(parent);
400 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700401 clk->count--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402out:
403 spin_unlock_irqrestore(&clk->lock, flags);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700404}
405EXPORT_SYMBOL(clk_disable);
406
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800407void clk_unprepare(struct clk *clk)
408{
Stephen Boyd64dce902012-08-09 12:59:40 -0700409 const char *name = clk ? clk->dbg_name : NULL;
410
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700411 if (IS_ERR_OR_NULL(clk))
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800412 return;
413
414 mutex_lock(&clk->prepare_lock);
Stephen Boyd64dce902012-08-09 12:59:40 -0700415 if (WARN(!clk->prepare_count, "%s is unbalanced (prepare)", name))
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800416 goto out;
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800417 if (clk->prepare_count == 1) {
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700418 struct clk *parent = clk->parent;
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800419
Stephen Boyd64dce902012-08-09 12:59:40 -0700420 WARN(clk->count,
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800421 "%s: Don't call unprepare when the clock is enabled\n",
Stephen Boyd64dce902012-08-09 12:59:40 -0700422 name);
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800423
424 if (clk->ops->unprepare)
425 clk->ops->unprepare(clk);
Stephen Boydd86d1f22012-01-24 17:36:34 -0800426 unvote_rate_vdd(clk, clk->rate);
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800427 clk_unprepare(clk->depends);
428 clk_unprepare(parent);
429 }
430 clk->prepare_count--;
431out:
432 mutex_unlock(&clk->prepare_lock);
433}
434EXPORT_SYMBOL(clk_unprepare);
435
Daniel Walker5e96da52010-05-12 13:43:28 -0700436int clk_reset(struct clk *clk, enum clk_reset_action action)
437{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700438 if (IS_ERR_OR_NULL(clk))
439 return -EINVAL;
440
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700441 if (!clk->ops->reset)
442 return -ENOSYS;
443
444 return clk->ops->reset(clk, action);
Daniel Walker5e96da52010-05-12 13:43:28 -0700445}
446EXPORT_SYMBOL(clk_reset);
447
Brian Swetland600f7cf2008-09-09 11:04:14 -0700448unsigned long clk_get_rate(struct clk *clk)
449{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700450 if (IS_ERR_OR_NULL(clk))
451 return 0;
452
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453 if (!clk->ops->get_rate)
Tianyi Gou7949ecb2012-02-14 14:25:32 -0800454 return clk->rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455
456 return clk->ops->get_rate(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700457}
458EXPORT_SYMBOL(clk_get_rate);
459
Matt Wagantall77952c42011-11-08 18:45:48 -0800460int clk_set_rate(struct clk *clk, unsigned long rate)
Brian Swetland600f7cf2008-09-09 11:04:14 -0700461{
Stephen Boydd86d1f22012-01-24 17:36:34 -0800462 unsigned long start_rate;
Stephen Boyd4fefefc2012-04-13 13:37:46 -0700463 int rc = 0;
Stephen Boyd64dce902012-08-09 12:59:40 -0700464 const char *name = clk ? clk->dbg_name : NULL;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700465
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700466 if (IS_ERR_OR_NULL(clk))
467 return -EINVAL;
468
Matt Wagantall77952c42011-11-08 18:45:48 -0800469 if (!clk->ops->set_rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700470 return -ENOSYS;
Daniel Walker3a790bb2010-12-13 14:35:10 -0800471
Saravana Kannana8c91542013-03-18 21:15:18 -0700472 if (!is_rate_valid(clk, rate))
473 return -EINVAL;
474
Stephen Boydd86d1f22012-01-24 17:36:34 -0800475 mutex_lock(&clk->prepare_lock);
Stephen Boyd4fefefc2012-04-13 13:37:46 -0700476
477 /* Return early if the rate isn't going to change */
478 if (clk->rate == rate)
479 goto out;
480
Stephen Boyd64dce902012-08-09 12:59:40 -0700481 trace_clock_set_rate(name, rate, raw_smp_processor_id());
Saravana Kannana8c91542013-03-18 21:15:18 -0700482
483 start_rate = clk->rate;
484
485 /* Enforce vdd requirements for target frequency. */
Stephen Boydd86d1f22012-01-24 17:36:34 -0800486 if (clk->prepare_count) {
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700487 rc = vote_rate_vdd(clk, rate);
488 if (rc)
Patrick Dalyc009d9e2012-10-01 11:55:27 -0700489 goto out;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700490 }
Matt Wagantall7205eea2011-11-04 17:31:29 -0700491
Saravana Kannana8c91542013-03-18 21:15:18 -0700492 rc = clk->ops->set_rate(clk, rate);
493 if (rc)
494 goto err_set_rate;
495 clk->rate = rate;
496
497 /* Release vdd requirements for starting frequency. */
498 if (clk->prepare_count)
499 unvote_rate_vdd(clk, start_rate);
500
Stephen Boyd4fefefc2012-04-13 13:37:46 -0700501out:
Stephen Boydd86d1f22012-01-24 17:36:34 -0800502 mutex_unlock(&clk->prepare_lock);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700503 return rc;
504
505err_set_rate:
Saravana Kannana8c91542013-03-18 21:15:18 -0700506 if (clk->prepare_count)
507 unvote_rate_vdd(clk, rate);
Stephen Boydd86d1f22012-01-24 17:36:34 -0800508 goto out;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700509}
Matt Wagantall77952c42011-11-08 18:45:48 -0800510EXPORT_SYMBOL(clk_set_rate);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700511
Daniel Walker5e96da52010-05-12 13:43:28 -0700512long clk_round_rate(struct clk *clk, unsigned long rate)
513{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700514 if (IS_ERR_OR_NULL(clk))
515 return -EINVAL;
516
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700517 if (!clk->ops->round_rate)
518 return -ENOSYS;
519
520 return clk->ops->round_rate(clk, rate);
Daniel Walker5e96da52010-05-12 13:43:28 -0700521}
522EXPORT_SYMBOL(clk_round_rate);
523
Daniel Walker5e96da52010-05-12 13:43:28 -0700524int clk_set_max_rate(struct clk *clk, unsigned long rate)
525{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700526 if (IS_ERR_OR_NULL(clk))
527 return -EINVAL;
528
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529 if (!clk->ops->set_max_rate)
530 return -ENOSYS;
531
532 return clk->ops->set_max_rate(clk, rate);
Daniel Walker5e96da52010-05-12 13:43:28 -0700533}
534EXPORT_SYMBOL(clk_set_max_rate);
535
Brian Swetland600f7cf2008-09-09 11:04:14 -0700536int clk_set_parent(struct clk *clk, struct clk *parent)
537{
Saravana Kannan776bdfd2013-03-18 20:08:28 -0700538 int rc = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700539
Saravana Kannan776bdfd2013-03-18 20:08:28 -0700540 if (!clk->ops->set_parent)
541 return -ENOSYS;
542
543 mutex_lock(&clk->prepare_lock);
544 if (clk->parent == parent)
545 goto out;
546 rc = clk->ops->set_parent(clk, parent);
547 if (!rc)
548 clk->parent = parent;
549out:
550 mutex_unlock(&clk->prepare_lock);
551
552 return rc;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700553}
554EXPORT_SYMBOL(clk_set_parent);
555
556struct clk *clk_get_parent(struct clk *clk)
557{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700558 if (IS_ERR_OR_NULL(clk))
559 return NULL;
560
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700561 return clk->parent;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700562}
563EXPORT_SYMBOL(clk_get_parent);
564
565int clk_set_flags(struct clk *clk, unsigned long flags)
566{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700567 if (IS_ERR_OR_NULL(clk))
Brian Swetland600f7cf2008-09-09 11:04:14 -0700568 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700569 if (!clk->ops->set_flags)
570 return -ENOSYS;
571
572 return clk->ops->set_flags(clk, flags);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700573}
574EXPORT_SYMBOL(clk_set_flags);
575
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800576static struct clock_init_data *clk_init_data;
577
Matt Wagantall20f8e0f2012-09-26 17:28:54 -0700578static void init_sibling_lists(struct clk_lookup *clock_tbl, size_t num_clocks)
579{
580 struct clk *clk, *parent;
581 unsigned n;
582
583 for (n = 0; n < num_clocks; n++) {
584 clk = clock_tbl[n].clk;
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700585 parent = clk->parent;
Matt Wagantall20f8e0f2012-09-26 17:28:54 -0700586 if (parent && list_empty(&clk->siblings))
587 list_add(&clk->siblings, &parent->children);
588 }
589}
590
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800591/**
592 * msm_clock_register() - Register additional clock tables
593 * @table: Table of clocks
594 * @size: Size of @table
595 *
596 * Upon return, clock APIs may be used to control clocks registered using this
597 * function. This API may only be used after msm_clock_init() has completed.
598 * Unlike msm_clock_init(), this function may be called multiple times with
599 * different clock lists and used after the kernel has finished booting.
600 */
601int msm_clock_register(struct clk_lookup *table, size_t size)
602{
603 if (!clk_init_data)
604 return -ENODEV;
605
606 if (!table)
607 return -EINVAL;
608
Matt Wagantall20f8e0f2012-09-26 17:28:54 -0700609 init_sibling_lists(table, size);
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800610 clkdev_add_table(table, size);
611 clock_debug_register(table, size);
612
613 return 0;
614}
615EXPORT_SYMBOL(msm_clock_register);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700616
Patrick Daly5ce9da32013-03-26 13:12:53 -0700617
618static void vdd_class_init(struct clk_vdd_class *vdd)
619{
620 struct handoff_vdd *v;
621 int i;
622
623 if (!vdd)
624 return;
625
626 list_for_each_entry(v, &handoff_vdd_list, list) {
627 if (v->vdd_class == vdd)
628 return;
629 }
630
631 pr_debug("voting for vdd_class %s\n", vdd->class_name);
632 if (vote_vdd_level(vdd, vdd->num_levels - 1))
633 pr_err("failed to vote for %s\n", vdd->class_name);
634
635 for (i = 0; i < vdd->num_regulators; i++)
636 regulator_enable(vdd->regulator[i]);
637
638 v = kmalloc(sizeof(*v), GFP_KERNEL);
639 if (!v) {
640 pr_err("Unable to kmalloc. %s will be stuck at max.\n",
641 vdd->class_name);
642 return;
643 }
644
645 v->vdd_class = vdd;
646 list_add_tail(&v->list, &handoff_vdd_list);
647}
648
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800649static int __init __handoff_clk(struct clk *clk)
Matt Wagantallf30fceb2012-06-12 19:13:11 -0700650{
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800651 enum handoff state = HANDOFF_DISABLED_CLK;
652 struct handoff_clk *h = NULL;
653 int rc;
654
655 if (clk == NULL || clk->flags & CLKFLAG_INIT_DONE ||
656 clk->flags & CLKFLAG_SKIP_HANDOFF)
657 return 0;
658
659 if (clk->flags & CLKFLAG_INIT_ERR)
660 return -ENXIO;
661
662 /* Handoff any 'depends' clock first. */
663 rc = __handoff_clk(clk->depends);
664 if (rc)
665 goto err;
Matt Wagantallf30fceb2012-06-12 19:13:11 -0700666
667 /*
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800668 * Handoff functions for the parent must be called before the
669 * children can be handed off. Without handing off the parents and
670 * knowing their rate and state (on/off), it's impossible to figure
671 * out the rate and state of the children.
Matt Wagantallf30fceb2012-06-12 19:13:11 -0700672 */
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800673 if (clk->ops->get_parent)
674 clk->parent = clk->ops->get_parent(clk);
Matt Wagantallf30fceb2012-06-12 19:13:11 -0700675
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800676 if (IS_ERR(clk->parent)) {
677 rc = PTR_ERR(clk->parent);
678 goto err;
679 }
Matt Wagantallf30fceb2012-06-12 19:13:11 -0700680
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800681 rc = __handoff_clk(clk->parent);
682 if (rc)
683 goto err;
684
685 if (clk->ops->handoff)
686 state = clk->ops->handoff(clk);
687
688 if (state == HANDOFF_ENABLED_CLK) {
689
690 h = kmalloc(sizeof(*h), GFP_KERNEL);
691 if (!h) {
692 rc = -ENOMEM;
693 goto err;
Matt Wagantallf30fceb2012-06-12 19:13:11 -0700694 }
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800695
696 rc = clk_prepare_enable(clk->parent);
697 if (rc)
698 goto err;
699
700 rc = clk_prepare_enable(clk->depends);
701 if (rc)
702 goto err_depends;
703
704 rc = vote_rate_vdd(clk, clk->rate);
705 WARN(rc, "%s unable to vote for voltage!\n", clk->dbg_name);
706
707 clk->count = 1;
708 clk->prepare_count = 1;
709 h->clk = clk;
710 list_add_tail(&h->list, &handoff_list);
711
712 pr_debug("Handed off %s rate=%lu\n", clk->dbg_name, clk->rate);
Matt Wagantallf30fceb2012-06-12 19:13:11 -0700713 }
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800714
715 clk->flags |= CLKFLAG_INIT_DONE;
716
717 return 0;
718
719err_depends:
720 clk_disable_unprepare(clk->parent);
721err:
722 kfree(h);
723 clk->flags |= CLKFLAG_INIT_ERR;
724 pr_err("%s handoff failed (%d)\n", clk->dbg_name, rc);
725 return rc;
Matt Wagantallf30fceb2012-06-12 19:13:11 -0700726}
727
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800728/**
729 * msm_clock_init() - Register and initialize a clock driver
730 * @data: Driver-specific clock initialization data
731 *
732 * Upon return from this call, clock APIs may be used to control
733 * clocks registered with this API.
734 */
735int __init msm_clock_init(struct clock_init_data *data)
Brian Swetland600f7cf2008-09-09 11:04:14 -0700736{
737 unsigned n;
Stephen Boyd94625ef2011-07-12 17:06:01 -0700738 struct clk_lookup *clock_tbl;
739 size_t num_clocks;
Stephen Boydbb600ae2011-08-02 20:11:40 -0700740
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800741 if (!data)
742 return -EINVAL;
743
Stephen Boydbb600ae2011-08-02 20:11:40 -0700744 clk_init_data = data;
Matt Wagantallb64888f2012-04-02 21:35:07 -0700745 if (clk_init_data->pre_init)
746 clk_init_data->pre_init();
Brian Swetland600f7cf2008-09-09 11:04:14 -0700747
Stephen Boyd94625ef2011-07-12 17:06:01 -0700748 clock_tbl = data->table;
749 num_clocks = data->size;
750
Matt Wagantallfba2c5b2012-10-18 12:54:15 -0700751 init_sibling_lists(clock_tbl, num_clocks);
752
Matt Wagantallb37fea42012-04-04 16:47:23 -0700753 /*
Patrick Daly5ce9da32013-03-26 13:12:53 -0700754 * Enable regulators and temporarily set them up at maximum voltage.
755 * Once all the clocks have made their respective vote, remove this
756 * temporary vote. The removing of the temporary vote is done at
757 * late_init, by which time we assume all the clocks would have been
758 * handed off.
759 */
760 for (n = 0; n < num_clocks; n++)
761 vdd_class_init(clock_tbl[n].clk->vdd_class);
762
763 /*
Matt Wagantallb37fea42012-04-04 16:47:23 -0700764 * Detect and preserve initial clock state until clock_late_init() or
765 * a driver explicitly changes it, whichever is first.
766 */
Matt Wagantallf30fceb2012-06-12 19:13:11 -0700767 for (n = 0; n < num_clocks; n++)
768 __handoff_clk(clock_tbl[n].clk);
Daniel Walker5e96da52010-05-12 13:43:28 -0700769
Matt Wagantallfba2c5b2012-10-18 12:54:15 -0700770 clkdev_add_table(clock_tbl, num_clocks);
Matt Wagantallb64888f2012-04-02 21:35:07 -0700771
772 if (clk_init_data->post_init)
773 clk_init_data->post_init();
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800774
Matt Wagantallfba2c5b2012-10-18 12:54:15 -0700775 clock_debug_init();
776 clock_debug_register(clock_tbl, num_clocks);
777
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800778 return 0;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700779}
780
Brian Swetland600f7cf2008-09-09 11:04:14 -0700781static int __init clock_late_init(void)
782{
Matt Wagantall158f73b2012-05-16 11:29:35 -0700783 struct handoff_clk *h, *h_temp;
Patrick Daly5ce9da32013-03-26 13:12:53 -0700784 struct handoff_vdd *v, *v_temp;
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800785 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700786
Patrick Daly5ce9da32013-03-26 13:12:53 -0700787 if (clk_init_data->late_init)
788 ret = clk_init_data->late_init();
789
Matt Wagantall647d1c12012-05-16 14:32:14 -0700790 pr_info("%s: Removing enables held for handed-off clocks\n", __func__);
Matt Wagantall158f73b2012-05-16 11:29:35 -0700791 list_for_each_entry_safe(h, h_temp, &handoff_list, list) {
792 clk_disable_unprepare(h->clk);
793 list_del(&h->list);
794 kfree(h);
795 }
796
Patrick Daly5ce9da32013-03-26 13:12:53 -0700797 list_for_each_entry_safe(v, v_temp, &handoff_vdd_list, list) {
798 unvote_vdd_level(v->vdd_class, v->vdd_class->num_levels - 1);
799 list_del(&v->list);
800 kfree(v);
801 }
802
Stephen Boydbb600ae2011-08-02 20:11:40 -0700803 return ret;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700804}
Brian Swetland600f7cf2008-09-09 11:04:14 -0700805late_initcall(clock_late_init);