blob: d21a7c87cf1490d15754894398c96a34888cf462 [file] [log] [blame]
Stephen Boyda6835112012-01-26 14:40:05 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/err.h>
15#include <mach/clk.h>
16
17#include "rpm_resources.h"
18#include "clock.h"
19#include "clock-rpm.h"
20
21static DEFINE_SPINLOCK(rpm_clock_lock);
22
23static int rpm_clk_enable(struct clk *clk)
24{
25 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026 struct rpm_clk *r = to_rpm_clk(clk);
Stephen Boyda6835112012-01-26 14:40:05 -080027 struct msm_rpm_iv_pair iv = { .id = r->rpm_clk_id };
28 int rc = 0;
Matt Wagantall9de3bfb2011-11-03 20:13:12 -070029 unsigned long this_khz, this_sleep_khz;
30 unsigned long peer_khz = 0, peer_sleep_khz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031 struct rpm_clk *peer = r->peer;
32
33 spin_lock_irqsave(&rpm_clock_lock, flags);
34
35 this_khz = r->last_set_khz;
36 /* Don't send requests to the RPM if the rate has not been set. */
37 if (this_khz == 0)
38 goto out;
39
40 this_sleep_khz = r->last_set_sleep_khz;
41
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042 /* Take peer clock's rate into account only if it's enabled. */
43 if (peer->enabled) {
44 peer_khz = peer->last_set_khz;
45 peer_sleep_khz = peer->last_set_sleep_khz;
46 }
47
48 iv.value = max(this_khz, peer_khz);
Stephen Boyda6835112012-01-26 14:40:05 -080049 if (r->branch)
50 iv.value = !!iv.value;
51
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052 rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
53 if (rc)
54 goto out;
55
56 iv.value = max(this_sleep_khz, peer_sleep_khz);
Stephen Boyda6835112012-01-26 14:40:05 -080057 if (r->branch)
58 iv.value = !!iv.value;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059 rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_SLEEP, &iv, 1);
Matt Wagantall735f01a2011-08-12 12:40:28 -070060 if (rc) {
61 iv.value = peer_khz;
62 msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
63 }
64
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065out:
66 if (!rc)
67 r->enabled = true;
68
69 spin_unlock_irqrestore(&rpm_clock_lock, flags);
70
71 return rc;
72}
73
74static void rpm_clk_disable(struct clk *clk)
75{
76 unsigned long flags;
77 struct rpm_clk *r = to_rpm_clk(clk);
78
79 spin_lock_irqsave(&rpm_clock_lock, flags);
80
81 if (r->last_set_khz) {
Stephen Boyda6835112012-01-26 14:40:05 -080082 struct msm_rpm_iv_pair iv = { .id = r->rpm_clk_id };
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083 struct rpm_clk *peer = r->peer;
Matt Wagantall9de3bfb2011-11-03 20:13:12 -070084 unsigned long peer_khz = 0, peer_sleep_khz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085 int rc;
86
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087 /* Take peer clock's rate into account only if it's enabled. */
88 if (peer->enabled) {
89 peer_khz = peer->last_set_khz;
90 peer_sleep_khz = peer->last_set_sleep_khz;
91 }
92
Stephen Boyda6835112012-01-26 14:40:05 -080093 iv.value = r->branch ? !!peer_khz : peer_khz;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070094 rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
95 if (rc)
96 goto out;
97
Stephen Boyda6835112012-01-26 14:40:05 -080098 iv.value = r->branch ? !!peer_sleep_khz : peer_sleep_khz;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099 rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_SLEEP, &iv, 1);
100 }
101 r->enabled = false;
102out:
103 spin_unlock_irqrestore(&rpm_clock_lock, flags);
104
105 return;
106}
107
Matt Wagantall77952c42011-11-08 18:45:48 -0800108static int rpm_clk_set_rate(struct clk *clk, unsigned long rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700109{
110 unsigned long flags;
111 struct rpm_clk *r = to_rpm_clk(clk);
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700112 unsigned long this_khz, this_sleep_khz;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700113 int rc = 0;
114
115 this_khz = DIV_ROUND_UP(rate, 1000);
116
117 spin_lock_irqsave(&rpm_clock_lock, flags);
118
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119 /* Active-only clocks don't care what the rate is during sleep. So,
120 * they vote for zero. */
121 if (r->active_only)
122 this_sleep_khz = 0;
123 else
124 this_sleep_khz = this_khz;
125
126 if (r->enabled) {
127 struct msm_rpm_iv_pair iv;
128 struct rpm_clk *peer = r->peer;
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700129 unsigned long peer_khz = 0, peer_sleep_khz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130
131 iv.id = r->rpm_clk_id;
132
133 /* Take peer clock's rate into account only if it's enabled. */
134 if (peer->enabled) {
135 peer_khz = peer->last_set_khz;
136 peer_sleep_khz = peer->last_set_sleep_khz;
137 }
138
139 iv.value = max(this_khz, peer_khz);
140 rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
141 if (rc)
142 goto out;
143
144 iv.value = max(this_sleep_khz, peer_sleep_khz);
145 rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_SLEEP, &iv, 1);
146 }
147 if (!rc) {
148 r->last_set_khz = this_khz;
149 r->last_set_sleep_khz = this_sleep_khz;
150 }
151
152out:
153 spin_unlock_irqrestore(&rpm_clock_lock, flags);
154
155 return rc;
156}
157
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700158static unsigned long rpm_clk_get_rate(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159{
160 struct rpm_clk *r = to_rpm_clk(clk);
161 struct msm_rpm_iv_pair iv = { r->rpm_status_id };
162 int rc;
163
164 rc = msm_rpm_get_status(&iv, 1);
165 if (rc < 0)
166 return rc;
167 return iv.value * 1000;
168}
169
170static int rpm_clk_is_enabled(struct clk *clk)
171{
172 return !!(rpm_clk_get_rate(clk));
173}
174
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700175static long rpm_clk_round_rate(struct clk *clk, unsigned long rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176{
177 /* Not supported. */
178 return rate;
179}
180
181static bool rpm_clk_is_local(struct clk *clk)
182{
183 return false;
184}
185
Matt Wagantall39c94ef2012-05-16 13:24:41 -0700186static enum handoff rpm_clk_handoff(struct clk *clk)
187{
188 struct rpm_clk *r = to_rpm_clk(clk);
189 struct msm_rpm_iv_pair iv = { r->rpm_status_id };
190 int rc;
191
192 /*
193 * Querying an RPM clock's status will return 0 unless the clock's
194 * rate has previously been set through the RPM. When handing off,
195 * assume these clocks are enabled (unless the RPM call fails) so
196 * child clocks of these RPM clocks can still be handed off.
197 */
198 rc = msm_rpm_get_status(&iv, 1);
199 if (rc < 0)
200 return HANDOFF_DISABLED_CLK;
201
202 r->last_set_khz = iv.value;
203 r->last_set_sleep_khz = iv.value;
204 clk->rate = iv.value * 1000;
205
206 return HANDOFF_ENABLED_CLK;
207}
208
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700209struct clk_ops clk_ops_rpm = {
210 .enable = rpm_clk_enable,
211 .disable = rpm_clk_disable,
Matt Wagantall77952c42011-11-08 18:45:48 -0800212 .set_rate = rpm_clk_set_rate,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213 .get_rate = rpm_clk_get_rate,
214 .is_enabled = rpm_clk_is_enabled,
215 .round_rate = rpm_clk_round_rate,
216 .is_local = rpm_clk_is_local,
Matt Wagantall39c94ef2012-05-16 13:24:41 -0700217 .handoff = rpm_clk_handoff,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218};
Stephen Boyda6835112012-01-26 14:40:05 -0800219
220struct clk_ops clk_ops_rpm_branch = {
221 .enable = rpm_clk_enable,
222 .disable = rpm_clk_disable,
223 .is_local = rpm_clk_is_local,
Matt Wagantall39c94ef2012-05-16 13:24:41 -0700224 .handoff = rpm_clk_handoff,
Stephen Boyda6835112012-01-26 14:40:05 -0800225};