blob: e06eb4baa8b0f11aa22b5dee3a05a449d9c148b3 [file] [log] [blame]
Stephen Boyda6835112012-01-26 14:40:05 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/err.h>
Matt Wagantall33d01f52012-02-23 23:27:44 -080015#include <mach/clk-provider.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070016
17#include "rpm_resources.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include "clock-rpm.h"
19
Vikram Mulukutla0f44f922012-05-16 21:52:26 -070020#define __clk_rpmrs_set_rate(r, value, ctx, noirq) \
21 ((r)->rpmrs_data->set_rate_fn((r), (value), (ctx), (noirq)))
22
23#define clk_rpmrs_set_rate_sleep(r, value) \
24 __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_sleep_id, 0)
25
26#define clk_rpmrs_set_rate_sleep_noirq(r, value) \
27 __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_sleep_id, 1)
28
29#define clk_rpmrs_set_rate_active(r, value) \
30 __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_active_id, 0)
31
32#define clk_rpmrs_set_rate_active_noirq(r, value) \
33 __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_active_id, 1)
34
35static int clk_rpmrs_set_rate(struct rpm_clk *r, uint32_t value,
36 uint32_t context, int noirq)
37{
38 struct msm_rpm_iv_pair iv = {
39 .id = r->rpm_clk_id,
40 .value = value,
41 };
42 if (noirq)
43 return msm_rpmrs_set_noirq(context, &iv, 1);
44 else
45 return msm_rpmrs_set(context, &iv, 1);
46}
47
48static int clk_rpmrs_get_rate(struct rpm_clk *r)
49{
50 int rc;
51 struct msm_rpm_iv_pair iv = { .id = r->rpm_status_id, };
52 rc = msm_rpm_get_status(&iv, 1);
Stephen Boydc7fc3b12012-05-17 14:42:46 -070053 return (rc < 0) ? rc : iv.value * r->factor;
Vikram Mulukutla0f44f922012-05-16 21:52:26 -070054}
55
Vikram Mulukutlae2300de2012-08-10 04:58:50 -070056static int clk_rpmrs_handoff(struct rpm_clk *r)
57{
58 struct msm_rpm_iv_pair iv = { .id = r->rpm_status_id, };
59 int rc = msm_rpm_get_status(&iv, 1);
60
61 if (rc < 0)
62 return rc;
63
64 if (!r->branch) {
65 r->last_set_khz = iv.value;
Vikram Mulukutlaaf72b712012-08-24 16:03:10 -070066 if (!r->active_only)
67 r->last_set_sleep_khz = iv.value;
Vikram Mulukutlae2300de2012-08-10 04:58:50 -070068 r->c.rate = iv.value * r->factor;
69 }
70
71 return 0;
72}
73
Vikram Mulukutlaa085dc82012-05-18 11:21:44 -070074static int clk_rpmrs_set_rate_smd(struct rpm_clk *r, uint32_t value,
75 uint32_t context, int noirq)
76{
77 struct msm_rpm_kvp kvp = {
Vikram Mulukutla4aba6c02012-07-10 11:12:05 -070078 .key = r->rpm_key,
Vikram Mulukutlaa085dc82012-05-18 11:21:44 -070079 .data = (void *)&value,
80 .length = sizeof(value),
81 };
82
83 if (noirq)
84 return msm_rpm_send_message_noirq(context,
85 r->rpm_res_type, r->rpm_clk_id, &kvp, 1);
86 else
87 return msm_rpm_send_message(context, r->rpm_res_type,
88 r->rpm_clk_id, &kvp, 1);
89}
90
Vikram Mulukutlae2300de2012-08-10 04:58:50 -070091static int clk_rpmrs_handoff_smd(struct rpm_clk *r)
92{
93 return 0;
94}
95
Vikram Mulukutla0f44f922012-05-16 21:52:26 -070096struct clk_rpmrs_data {
97 int (*set_rate_fn)(struct rpm_clk *r, uint32_t value,
98 uint32_t context, int noirq);
99 int (*get_rate_fn)(struct rpm_clk *r);
Vikram Mulukutlae2300de2012-08-10 04:58:50 -0700100 int (*handoff_fn)(struct rpm_clk *r);
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700101 int ctx_active_id;
102 int ctx_sleep_id;
103};
104
105struct clk_rpmrs_data clk_rpmrs_data = {
106 .set_rate_fn = clk_rpmrs_set_rate,
107 .get_rate_fn = clk_rpmrs_get_rate,
Vikram Mulukutlae2300de2012-08-10 04:58:50 -0700108 .handoff_fn = clk_rpmrs_handoff,
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700109 .ctx_active_id = MSM_RPM_CTX_SET_0,
110 .ctx_sleep_id = MSM_RPM_CTX_SET_SLEEP,
111};
112
Vikram Mulukutlaa085dc82012-05-18 11:21:44 -0700113struct clk_rpmrs_data clk_rpmrs_data_smd = {
114 .set_rate_fn = clk_rpmrs_set_rate_smd,
Vikram Mulukutlae2300de2012-08-10 04:58:50 -0700115 .handoff_fn = clk_rpmrs_handoff_smd,
Vikram Mulukutlaa085dc82012-05-18 11:21:44 -0700116 .ctx_active_id = MSM_RPM_CTX_ACTIVE_SET,
117 .ctx_sleep_id = MSM_RPM_CTX_SLEEP_SET,
118};
119
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700120static DEFINE_SPINLOCK(rpm_clock_lock);
121
122static int rpm_clk_enable(struct clk *clk)
123{
124 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125 struct rpm_clk *r = to_rpm_clk(clk);
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700126 uint32_t value;
Stephen Boyda6835112012-01-26 14:40:05 -0800127 int rc = 0;
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700128 unsigned long this_khz, this_sleep_khz;
129 unsigned long peer_khz = 0, peer_sleep_khz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130 struct rpm_clk *peer = r->peer;
131
132 spin_lock_irqsave(&rpm_clock_lock, flags);
133
134 this_khz = r->last_set_khz;
135 /* Don't send requests to the RPM if the rate has not been set. */
136 if (this_khz == 0)
137 goto out;
138
139 this_sleep_khz = r->last_set_sleep_khz;
140
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141 /* Take peer clock's rate into account only if it's enabled. */
142 if (peer->enabled) {
143 peer_khz = peer->last_set_khz;
144 peer_sleep_khz = peer->last_set_sleep_khz;
145 }
146
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700147 value = max(this_khz, peer_khz);
Stephen Boyda6835112012-01-26 14:40:05 -0800148 if (r->branch)
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700149 value = !!value;
Stephen Boyda6835112012-01-26 14:40:05 -0800150
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700151 rc = clk_rpmrs_set_rate_active_noirq(r, value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700152 if (rc)
153 goto out;
154
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700155 value = max(this_sleep_khz, peer_sleep_khz);
Stephen Boyda6835112012-01-26 14:40:05 -0800156 if (r->branch)
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700157 value = !!value;
158
159 rc = clk_rpmrs_set_rate_sleep_noirq(r, value);
Matt Wagantall735f01a2011-08-12 12:40:28 -0700160 if (rc) {
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700161 /* Undo the active set vote and restore it to peer_khz */
162 value = peer_khz;
163 rc = clk_rpmrs_set_rate_active_noirq(r, value);
Matt Wagantall735f01a2011-08-12 12:40:28 -0700164 }
165
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700166out:
167 if (!rc)
168 r->enabled = true;
169
170 spin_unlock_irqrestore(&rpm_clock_lock, flags);
171
172 return rc;
173}
174
175static void rpm_clk_disable(struct clk *clk)
176{
177 unsigned long flags;
178 struct rpm_clk *r = to_rpm_clk(clk);
179
180 spin_lock_irqsave(&rpm_clock_lock, flags);
181
182 if (r->last_set_khz) {
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700183 uint32_t value;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184 struct rpm_clk *peer = r->peer;
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700185 unsigned long peer_khz = 0, peer_sleep_khz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700186 int rc;
187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188 /* Take peer clock's rate into account only if it's enabled. */
189 if (peer->enabled) {
190 peer_khz = peer->last_set_khz;
191 peer_sleep_khz = peer->last_set_sleep_khz;
192 }
193
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700194 value = r->branch ? !!peer_khz : peer_khz;
195 rc = clk_rpmrs_set_rate_active_noirq(r, value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196 if (rc)
197 goto out;
198
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700199 value = r->branch ? !!peer_sleep_khz : peer_sleep_khz;
200 rc = clk_rpmrs_set_rate_sleep_noirq(r, value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700201 }
202 r->enabled = false;
203out:
204 spin_unlock_irqrestore(&rpm_clock_lock, flags);
205
206 return;
207}
208
Matt Wagantall77952c42011-11-08 18:45:48 -0800209static int rpm_clk_set_rate(struct clk *clk, unsigned long rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210{
211 unsigned long flags;
212 struct rpm_clk *r = to_rpm_clk(clk);
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700213 unsigned long this_khz, this_sleep_khz;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214 int rc = 0;
215
Stephen Boydc7fc3b12012-05-17 14:42:46 -0700216 this_khz = DIV_ROUND_UP(rate, r->factor);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217
218 spin_lock_irqsave(&rpm_clock_lock, flags);
219
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700220 /* Active-only clocks don't care what the rate is during sleep. So,
221 * they vote for zero. */
222 if (r->active_only)
223 this_sleep_khz = 0;
224 else
225 this_sleep_khz = this_khz;
226
227 if (r->enabled) {
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700228 uint32_t value;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700229 struct rpm_clk *peer = r->peer;
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700230 unsigned long peer_khz = 0, peer_sleep_khz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232 /* Take peer clock's rate into account only if it's enabled. */
233 if (peer->enabled) {
234 peer_khz = peer->last_set_khz;
235 peer_sleep_khz = peer->last_set_sleep_khz;
236 }
237
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700238 value = max(this_khz, peer_khz);
239 rc = clk_rpmrs_set_rate_active_noirq(r, value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240 if (rc)
241 goto out;
242
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700243 value = max(this_sleep_khz, peer_sleep_khz);
244 rc = clk_rpmrs_set_rate_sleep_noirq(r, value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245 }
246 if (!rc) {
247 r->last_set_khz = this_khz;
248 r->last_set_sleep_khz = this_sleep_khz;
249 }
250
251out:
252 spin_unlock_irqrestore(&rpm_clock_lock, flags);
253
254 return rc;
255}
256
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700257static unsigned long rpm_clk_get_rate(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258{
259 struct rpm_clk *r = to_rpm_clk(clk);
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700260 if (r->rpmrs_data->get_rate_fn)
261 return r->rpmrs_data->get_rate_fn(r);
262 else
Vikram Mulukutla9505c3c2012-06-18 19:08:40 -0700263 return clk->rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264}
265
266static int rpm_clk_is_enabled(struct clk *clk)
267{
268 return !!(rpm_clk_get_rate(clk));
269}
270
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700271static long rpm_clk_round_rate(struct clk *clk, unsigned long rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272{
273 /* Not supported. */
274 return rate;
275}
276
277static bool rpm_clk_is_local(struct clk *clk)
278{
279 return false;
280}
281
Matt Wagantall39c94ef2012-05-16 13:24:41 -0700282static enum handoff rpm_clk_handoff(struct clk *clk)
283{
284 struct rpm_clk *r = to_rpm_clk(clk);
Matt Wagantall39c94ef2012-05-16 13:24:41 -0700285 int rc;
286
287 /*
288 * Querying an RPM clock's status will return 0 unless the clock's
289 * rate has previously been set through the RPM. When handing off,
290 * assume these clocks are enabled (unless the RPM call fails) so
291 * child clocks of these RPM clocks can still be handed off.
292 */
Vikram Mulukutlae2300de2012-08-10 04:58:50 -0700293 rc = r->rpmrs_data->handoff_fn(r);
Matt Wagantall39c94ef2012-05-16 13:24:41 -0700294 if (rc < 0)
295 return HANDOFF_DISABLED_CLK;
296
Matt Wagantall39c94ef2012-05-16 13:24:41 -0700297 return HANDOFF_ENABLED_CLK;
298}
299
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700300struct clk_ops clk_ops_rpm = {
301 .enable = rpm_clk_enable,
302 .disable = rpm_clk_disable,
Matt Wagantall77952c42011-11-08 18:45:48 -0800303 .set_rate = rpm_clk_set_rate,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700304 .get_rate = rpm_clk_get_rate,
305 .is_enabled = rpm_clk_is_enabled,
306 .round_rate = rpm_clk_round_rate,
307 .is_local = rpm_clk_is_local,
Matt Wagantall39c94ef2012-05-16 13:24:41 -0700308 .handoff = rpm_clk_handoff,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309};
Stephen Boyda6835112012-01-26 14:40:05 -0800310
311struct clk_ops clk_ops_rpm_branch = {
312 .enable = rpm_clk_enable,
313 .disable = rpm_clk_disable,
314 .is_local = rpm_clk_is_local,
Matt Wagantall39c94ef2012-05-16 13:24:41 -0700315 .handoff = rpm_clk_handoff,
Stephen Boyda6835112012-01-26 14:40:05 -0800316};