blob: fc2e83f0dcefd4e2a4cc7f4508cf8ff3f7715ca0 [file] [log] [blame]
Stephen Boyda6835112012-01-26 14:40:05 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/err.h>
15#include <mach/clk.h>
16
17#include "rpm_resources.h"
18#include "clock.h"
19#include "clock-rpm.h"
20
21static DEFINE_SPINLOCK(rpm_clock_lock);
22
23static int rpm_clk_enable(struct clk *clk)
24{
25 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026 struct rpm_clk *r = to_rpm_clk(clk);
Stephen Boyda6835112012-01-26 14:40:05 -080027 struct msm_rpm_iv_pair iv = { .id = r->rpm_clk_id };
28 int rc = 0;
Matt Wagantall9de3bfb2011-11-03 20:13:12 -070029 unsigned long this_khz, this_sleep_khz;
30 unsigned long peer_khz = 0, peer_sleep_khz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031 struct rpm_clk *peer = r->peer;
32
33 spin_lock_irqsave(&rpm_clock_lock, flags);
34
35 this_khz = r->last_set_khz;
36 /* Don't send requests to the RPM if the rate has not been set. */
37 if (this_khz == 0)
38 goto out;
39
40 this_sleep_khz = r->last_set_sleep_khz;
41
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042 /* Take peer clock's rate into account only if it's enabled. */
43 if (peer->enabled) {
44 peer_khz = peer->last_set_khz;
45 peer_sleep_khz = peer->last_set_sleep_khz;
46 }
47
48 iv.value = max(this_khz, peer_khz);
Stephen Boyda6835112012-01-26 14:40:05 -080049 if (r->branch)
50 iv.value = !!iv.value;
51
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052 rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
53 if (rc)
54 goto out;
55
56 iv.value = max(this_sleep_khz, peer_sleep_khz);
Stephen Boyda6835112012-01-26 14:40:05 -080057 if (r->branch)
58 iv.value = !!iv.value;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059 rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_SLEEP, &iv, 1);
Matt Wagantall735f01a2011-08-12 12:40:28 -070060 if (rc) {
61 iv.value = peer_khz;
62 msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
63 }
64
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065out:
66 if (!rc)
67 r->enabled = true;
68
69 spin_unlock_irqrestore(&rpm_clock_lock, flags);
70
71 return rc;
72}
73
74static void rpm_clk_disable(struct clk *clk)
75{
76 unsigned long flags;
77 struct rpm_clk *r = to_rpm_clk(clk);
78
79 spin_lock_irqsave(&rpm_clock_lock, flags);
80
81 if (r->last_set_khz) {
Stephen Boyda6835112012-01-26 14:40:05 -080082 struct msm_rpm_iv_pair iv = { .id = r->rpm_clk_id };
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083 struct rpm_clk *peer = r->peer;
Matt Wagantall9de3bfb2011-11-03 20:13:12 -070084 unsigned long peer_khz = 0, peer_sleep_khz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085 int rc;
86
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087 /* Take peer clock's rate into account only if it's enabled. */
88 if (peer->enabled) {
89 peer_khz = peer->last_set_khz;
90 peer_sleep_khz = peer->last_set_sleep_khz;
91 }
92
Stephen Boyda6835112012-01-26 14:40:05 -080093 iv.value = r->branch ? !!peer_khz : peer_khz;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070094 rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
95 if (rc)
96 goto out;
97
Stephen Boyda6835112012-01-26 14:40:05 -080098 iv.value = r->branch ? !!peer_sleep_khz : peer_sleep_khz;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099 rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_SLEEP, &iv, 1);
100 }
101 r->enabled = false;
102out:
103 spin_unlock_irqrestore(&rpm_clock_lock, flags);
104
105 return;
106}
107
Matt Wagantall77952c42011-11-08 18:45:48 -0800108static int rpm_clk_set_rate(struct clk *clk, unsigned long rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700109{
110 unsigned long flags;
111 struct rpm_clk *r = to_rpm_clk(clk);
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700112 unsigned long this_khz, this_sleep_khz;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700113 int rc = 0;
114
115 this_khz = DIV_ROUND_UP(rate, 1000);
116
117 spin_lock_irqsave(&rpm_clock_lock, flags);
118
119 /* Ignore duplicate requests. */
120 if (r->last_set_khz == this_khz)
121 goto out;
122
123 /* Active-only clocks don't care what the rate is during sleep. So,
124 * they vote for zero. */
125 if (r->active_only)
126 this_sleep_khz = 0;
127 else
128 this_sleep_khz = this_khz;
129
130 if (r->enabled) {
131 struct msm_rpm_iv_pair iv;
132 struct rpm_clk *peer = r->peer;
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700133 unsigned long peer_khz = 0, peer_sleep_khz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134
135 iv.id = r->rpm_clk_id;
136
137 /* Take peer clock's rate into account only if it's enabled. */
138 if (peer->enabled) {
139 peer_khz = peer->last_set_khz;
140 peer_sleep_khz = peer->last_set_sleep_khz;
141 }
142
143 iv.value = max(this_khz, peer_khz);
144 rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
145 if (rc)
146 goto out;
147
148 iv.value = max(this_sleep_khz, peer_sleep_khz);
149 rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_SLEEP, &iv, 1);
150 }
151 if (!rc) {
152 r->last_set_khz = this_khz;
153 r->last_set_sleep_khz = this_sleep_khz;
154 }
155
156out:
157 spin_unlock_irqrestore(&rpm_clock_lock, flags);
158
159 return rc;
160}
161
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700162static unsigned long rpm_clk_get_rate(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700163{
164 struct rpm_clk *r = to_rpm_clk(clk);
165 struct msm_rpm_iv_pair iv = { r->rpm_status_id };
166 int rc;
167
168 rc = msm_rpm_get_status(&iv, 1);
169 if (rc < 0)
170 return rc;
171 return iv.value * 1000;
172}
173
174static int rpm_clk_is_enabled(struct clk *clk)
175{
176 return !!(rpm_clk_get_rate(clk));
177}
178
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700179static long rpm_clk_round_rate(struct clk *clk, unsigned long rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180{
181 /* Not supported. */
182 return rate;
183}
184
185static bool rpm_clk_is_local(struct clk *clk)
186{
187 return false;
188}
189
Stephen Boyda6835112012-01-26 14:40:05 -0800190static unsigned long rpm_branch_clk_get_rate(struct clk *clk)
191{
192 struct rpm_clk *r = to_rpm_clk(clk);
193 return r->last_set_khz * 1000;
194}
195
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196struct clk_ops clk_ops_rpm = {
197 .enable = rpm_clk_enable,
198 .disable = rpm_clk_disable,
Matt Wagantall77952c42011-11-08 18:45:48 -0800199 .set_rate = rpm_clk_set_rate,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700200 .get_rate = rpm_clk_get_rate,
201 .is_enabled = rpm_clk_is_enabled,
202 .round_rate = rpm_clk_round_rate,
203 .is_local = rpm_clk_is_local,
204};
Stephen Boyda6835112012-01-26 14:40:05 -0800205
206struct clk_ops clk_ops_rpm_branch = {
207 .enable = rpm_clk_enable,
208 .disable = rpm_clk_disable,
209 .is_local = rpm_clk_is_local,
210 .get_rate = rpm_branch_clk_get_rate,
211};