blob: 149a05113d98c15e458aabc559ff11c14babb2b4 [file] [log] [blame]
Stephen Boyda6835112012-01-26 14:40:05 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/err.h>
15#include <mach/clk.h>
16
17#include "rpm_resources.h"
18#include "clock.h"
19#include "clock-rpm.h"
20
Vikram Mulukutla0f44f922012-05-16 21:52:26 -070021#define __clk_rpmrs_set_rate(r, value, ctx, noirq) \
22 ((r)->rpmrs_data->set_rate_fn((r), (value), (ctx), (noirq)))
23
24#define clk_rpmrs_set_rate_sleep(r, value) \
25 __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_sleep_id, 0)
26
27#define clk_rpmrs_set_rate_sleep_noirq(r, value) \
28 __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_sleep_id, 1)
29
30#define clk_rpmrs_set_rate_active(r, value) \
31 __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_active_id, 0)
32
33#define clk_rpmrs_set_rate_active_noirq(r, value) \
34 __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_active_id, 1)
35
36static int clk_rpmrs_set_rate(struct rpm_clk *r, uint32_t value,
37 uint32_t context, int noirq)
38{
39 struct msm_rpm_iv_pair iv = {
40 .id = r->rpm_clk_id,
41 .value = value,
42 };
43 if (noirq)
44 return msm_rpmrs_set_noirq(context, &iv, 1);
45 else
46 return msm_rpmrs_set(context, &iv, 1);
47}
48
49static int clk_rpmrs_get_rate(struct rpm_clk *r)
50{
51 int rc;
52 struct msm_rpm_iv_pair iv = { .id = r->rpm_status_id, };
53 rc = msm_rpm_get_status(&iv, 1);
Stephen Boydc7fc3b12012-05-17 14:42:46 -070054 return (rc < 0) ? rc : iv.value * r->factor;
Vikram Mulukutla0f44f922012-05-16 21:52:26 -070055}
56
Vikram Mulukutlae2300de2012-08-10 04:58:50 -070057static int clk_rpmrs_handoff(struct rpm_clk *r)
58{
59 struct msm_rpm_iv_pair iv = { .id = r->rpm_status_id, };
60 int rc = msm_rpm_get_status(&iv, 1);
61
62 if (rc < 0)
63 return rc;
64
65 if (!r->branch) {
66 r->last_set_khz = iv.value;
Vikram Mulukutlaaf72b712012-08-24 16:03:10 -070067 if (!r->active_only)
68 r->last_set_sleep_khz = iv.value;
Vikram Mulukutlae2300de2012-08-10 04:58:50 -070069 r->c.rate = iv.value * r->factor;
70 }
71
72 return 0;
73}
74
Vikram Mulukutlaa085dc82012-05-18 11:21:44 -070075static int clk_rpmrs_set_rate_smd(struct rpm_clk *r, uint32_t value,
76 uint32_t context, int noirq)
77{
78 struct msm_rpm_kvp kvp = {
Vikram Mulukutla4aba6c02012-07-10 11:12:05 -070079 .key = r->rpm_key,
Vikram Mulukutlaa085dc82012-05-18 11:21:44 -070080 .data = (void *)&value,
81 .length = sizeof(value),
82 };
83
84 if (noirq)
85 return msm_rpm_send_message_noirq(context,
86 r->rpm_res_type, r->rpm_clk_id, &kvp, 1);
87 else
88 return msm_rpm_send_message(context, r->rpm_res_type,
89 r->rpm_clk_id, &kvp, 1);
90}
91
Vikram Mulukutlae2300de2012-08-10 04:58:50 -070092static int clk_rpmrs_handoff_smd(struct rpm_clk *r)
93{
94 return 0;
95}
96
Vikram Mulukutla0f44f922012-05-16 21:52:26 -070097struct clk_rpmrs_data {
98 int (*set_rate_fn)(struct rpm_clk *r, uint32_t value,
99 uint32_t context, int noirq);
100 int (*get_rate_fn)(struct rpm_clk *r);
Vikram Mulukutlae2300de2012-08-10 04:58:50 -0700101 int (*handoff_fn)(struct rpm_clk *r);
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700102 int ctx_active_id;
103 int ctx_sleep_id;
104};
105
106struct clk_rpmrs_data clk_rpmrs_data = {
107 .set_rate_fn = clk_rpmrs_set_rate,
108 .get_rate_fn = clk_rpmrs_get_rate,
Vikram Mulukutlae2300de2012-08-10 04:58:50 -0700109 .handoff_fn = clk_rpmrs_handoff,
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700110 .ctx_active_id = MSM_RPM_CTX_SET_0,
111 .ctx_sleep_id = MSM_RPM_CTX_SET_SLEEP,
112};
113
Vikram Mulukutlaa085dc82012-05-18 11:21:44 -0700114struct clk_rpmrs_data clk_rpmrs_data_smd = {
115 .set_rate_fn = clk_rpmrs_set_rate_smd,
Vikram Mulukutlae2300de2012-08-10 04:58:50 -0700116 .handoff_fn = clk_rpmrs_handoff_smd,
Vikram Mulukutlaa085dc82012-05-18 11:21:44 -0700117 .ctx_active_id = MSM_RPM_CTX_ACTIVE_SET,
118 .ctx_sleep_id = MSM_RPM_CTX_SLEEP_SET,
119};
120
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700121static DEFINE_SPINLOCK(rpm_clock_lock);
122
123static int rpm_clk_enable(struct clk *clk)
124{
125 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126 struct rpm_clk *r = to_rpm_clk(clk);
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700127 uint32_t value;
Stephen Boyda6835112012-01-26 14:40:05 -0800128 int rc = 0;
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700129 unsigned long this_khz, this_sleep_khz;
130 unsigned long peer_khz = 0, peer_sleep_khz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131 struct rpm_clk *peer = r->peer;
132
133 spin_lock_irqsave(&rpm_clock_lock, flags);
134
135 this_khz = r->last_set_khz;
136 /* Don't send requests to the RPM if the rate has not been set. */
137 if (this_khz == 0)
138 goto out;
139
140 this_sleep_khz = r->last_set_sleep_khz;
141
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142 /* Take peer clock's rate into account only if it's enabled. */
143 if (peer->enabled) {
144 peer_khz = peer->last_set_khz;
145 peer_sleep_khz = peer->last_set_sleep_khz;
146 }
147
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700148 value = max(this_khz, peer_khz);
Stephen Boyda6835112012-01-26 14:40:05 -0800149 if (r->branch)
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700150 value = !!value;
Stephen Boyda6835112012-01-26 14:40:05 -0800151
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700152 rc = clk_rpmrs_set_rate_active_noirq(r, value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153 if (rc)
154 goto out;
155
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700156 value = max(this_sleep_khz, peer_sleep_khz);
Stephen Boyda6835112012-01-26 14:40:05 -0800157 if (r->branch)
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700158 value = !!value;
159
160 rc = clk_rpmrs_set_rate_sleep_noirq(r, value);
Matt Wagantall735f01a2011-08-12 12:40:28 -0700161 if (rc) {
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700162 /* Undo the active set vote and restore it to peer_khz */
163 value = peer_khz;
164 rc = clk_rpmrs_set_rate_active_noirq(r, value);
Matt Wagantall735f01a2011-08-12 12:40:28 -0700165 }
166
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167out:
168 if (!rc)
169 r->enabled = true;
170
171 spin_unlock_irqrestore(&rpm_clock_lock, flags);
172
173 return rc;
174}
175
176static void rpm_clk_disable(struct clk *clk)
177{
178 unsigned long flags;
179 struct rpm_clk *r = to_rpm_clk(clk);
180
181 spin_lock_irqsave(&rpm_clock_lock, flags);
182
183 if (r->last_set_khz) {
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700184 uint32_t value;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185 struct rpm_clk *peer = r->peer;
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700186 unsigned long peer_khz = 0, peer_sleep_khz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700187 int rc;
188
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189 /* Take peer clock's rate into account only if it's enabled. */
190 if (peer->enabled) {
191 peer_khz = peer->last_set_khz;
192 peer_sleep_khz = peer->last_set_sleep_khz;
193 }
194
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700195 value = r->branch ? !!peer_khz : peer_khz;
196 rc = clk_rpmrs_set_rate_active_noirq(r, value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197 if (rc)
198 goto out;
199
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700200 value = r->branch ? !!peer_sleep_khz : peer_sleep_khz;
201 rc = clk_rpmrs_set_rate_sleep_noirq(r, value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 }
203 r->enabled = false;
204out:
205 spin_unlock_irqrestore(&rpm_clock_lock, flags);
206
207 return;
208}
209
Matt Wagantall77952c42011-11-08 18:45:48 -0800210static int rpm_clk_set_rate(struct clk *clk, unsigned long rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700211{
212 unsigned long flags;
213 struct rpm_clk *r = to_rpm_clk(clk);
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700214 unsigned long this_khz, this_sleep_khz;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 int rc = 0;
216
Stephen Boydc7fc3b12012-05-17 14:42:46 -0700217 this_khz = DIV_ROUND_UP(rate, r->factor);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218
219 spin_lock_irqsave(&rpm_clock_lock, flags);
220
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700221 /* Active-only clocks don't care what the rate is during sleep. So,
222 * they vote for zero. */
223 if (r->active_only)
224 this_sleep_khz = 0;
225 else
226 this_sleep_khz = this_khz;
227
228 if (r->enabled) {
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700229 uint32_t value;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 struct rpm_clk *peer = r->peer;
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700231 unsigned long peer_khz = 0, peer_sleep_khz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700233 /* Take peer clock's rate into account only if it's enabled. */
234 if (peer->enabled) {
235 peer_khz = peer->last_set_khz;
236 peer_sleep_khz = peer->last_set_sleep_khz;
237 }
238
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700239 value = max(this_khz, peer_khz);
240 rc = clk_rpmrs_set_rate_active_noirq(r, value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241 if (rc)
242 goto out;
243
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700244 value = max(this_sleep_khz, peer_sleep_khz);
245 rc = clk_rpmrs_set_rate_sleep_noirq(r, value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700246 }
247 if (!rc) {
248 r->last_set_khz = this_khz;
249 r->last_set_sleep_khz = this_sleep_khz;
250 }
251
252out:
253 spin_unlock_irqrestore(&rpm_clock_lock, flags);
254
255 return rc;
256}
257
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700258static unsigned long rpm_clk_get_rate(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259{
260 struct rpm_clk *r = to_rpm_clk(clk);
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700261 if (r->rpmrs_data->get_rate_fn)
262 return r->rpmrs_data->get_rate_fn(r);
263 else
Vikram Mulukutla9505c3c2012-06-18 19:08:40 -0700264 return clk->rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265}
266
267static int rpm_clk_is_enabled(struct clk *clk)
268{
269 return !!(rpm_clk_get_rate(clk));
270}
271
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700272static long rpm_clk_round_rate(struct clk *clk, unsigned long rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700273{
274 /* Not supported. */
275 return rate;
276}
277
278static bool rpm_clk_is_local(struct clk *clk)
279{
280 return false;
281}
282
Matt Wagantall39c94ef2012-05-16 13:24:41 -0700283static enum handoff rpm_clk_handoff(struct clk *clk)
284{
285 struct rpm_clk *r = to_rpm_clk(clk);
Matt Wagantall39c94ef2012-05-16 13:24:41 -0700286 int rc;
287
288 /*
289 * Querying an RPM clock's status will return 0 unless the clock's
290 * rate has previously been set through the RPM. When handing off,
291 * assume these clocks are enabled (unless the RPM call fails) so
292 * child clocks of these RPM clocks can still be handed off.
293 */
Vikram Mulukutlae2300de2012-08-10 04:58:50 -0700294 rc = r->rpmrs_data->handoff_fn(r);
Matt Wagantall39c94ef2012-05-16 13:24:41 -0700295 if (rc < 0)
296 return HANDOFF_DISABLED_CLK;
297
Matt Wagantall39c94ef2012-05-16 13:24:41 -0700298 return HANDOFF_ENABLED_CLK;
299}
300
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301struct clk_ops clk_ops_rpm = {
302 .enable = rpm_clk_enable,
303 .disable = rpm_clk_disable,
Matt Wagantall77952c42011-11-08 18:45:48 -0800304 .set_rate = rpm_clk_set_rate,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700305 .get_rate = rpm_clk_get_rate,
306 .is_enabled = rpm_clk_is_enabled,
307 .round_rate = rpm_clk_round_rate,
308 .is_local = rpm_clk_is_local,
Matt Wagantall39c94ef2012-05-16 13:24:41 -0700309 .handoff = rpm_clk_handoff,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700310};
Stephen Boyda6835112012-01-26 14:40:05 -0800311
312struct clk_ops clk_ops_rpm_branch = {
313 .enable = rpm_clk_enable,
314 .disable = rpm_clk_disable,
315 .is_local = rpm_clk_is_local,
Matt Wagantall39c94ef2012-05-16 13:24:41 -0700316 .handoff = rpm_clk_handoff,
Stephen Boyda6835112012-01-26 14:40:05 -0800317};