blob: 75cba59974e68d02842658627ccfa1df8c9da46d [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/err.h>
15#include <mach/clk.h>
16
17#include "rpm_resources.h"
18#include "clock.h"
19#include "clock-rpm.h"
20
21static DEFINE_SPINLOCK(rpm_clock_lock);
22
23static int rpm_clk_enable(struct clk *clk)
24{
25 unsigned long flags;
26 struct msm_rpm_iv_pair iv;
27 int rc = 0;
28 struct rpm_clk *r = to_rpm_clk(clk);
Matt Wagantall9de3bfb2011-11-03 20:13:12 -070029 unsigned long this_khz, this_sleep_khz;
30 unsigned long peer_khz = 0, peer_sleep_khz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031 struct rpm_clk *peer = r->peer;
32
33 spin_lock_irqsave(&rpm_clock_lock, flags);
34
35 this_khz = r->last_set_khz;
36 /* Don't send requests to the RPM if the rate has not been set. */
37 if (this_khz == 0)
38 goto out;
39
40 this_sleep_khz = r->last_set_sleep_khz;
41
42 iv.id = r->rpm_clk_id;
43
44 /* Take peer clock's rate into account only if it's enabled. */
45 if (peer->enabled) {
46 peer_khz = peer->last_set_khz;
47 peer_sleep_khz = peer->last_set_sleep_khz;
48 }
49
50 iv.value = max(this_khz, peer_khz);
51 rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
52 if (rc)
53 goto out;
54
55 iv.value = max(this_sleep_khz, peer_sleep_khz);
56 rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_SLEEP, &iv, 1);
Matt Wagantall735f01a2011-08-12 12:40:28 -070057 if (rc) {
58 iv.value = peer_khz;
59 msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
60 }
61
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062out:
63 if (!rc)
64 r->enabled = true;
65
66 spin_unlock_irqrestore(&rpm_clock_lock, flags);
67
68 return rc;
69}
70
71static void rpm_clk_disable(struct clk *clk)
72{
73 unsigned long flags;
74 struct rpm_clk *r = to_rpm_clk(clk);
75
76 spin_lock_irqsave(&rpm_clock_lock, flags);
77
78 if (r->last_set_khz) {
79 struct msm_rpm_iv_pair iv;
80 struct rpm_clk *peer = r->peer;
Matt Wagantall9de3bfb2011-11-03 20:13:12 -070081 unsigned long peer_khz = 0, peer_sleep_khz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082 int rc;
83
84 iv.id = r->rpm_clk_id;
85
86 /* Take peer clock's rate into account only if it's enabled. */
87 if (peer->enabled) {
88 peer_khz = peer->last_set_khz;
89 peer_sleep_khz = peer->last_set_sleep_khz;
90 }
91
92 iv.value = peer_khz;
93 rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
94 if (rc)
95 goto out;
96
97 iv.value = peer_sleep_khz;
98 rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_SLEEP, &iv, 1);
99 }
100 r->enabled = false;
101out:
102 spin_unlock_irqrestore(&rpm_clock_lock, flags);
103
104 return;
105}
106
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700107static int rpm_clk_set_min_rate(struct clk *clk, unsigned long rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108{
109 unsigned long flags;
110 struct rpm_clk *r = to_rpm_clk(clk);
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700111 unsigned long this_khz, this_sleep_khz;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700112 int rc = 0;
113
114 this_khz = DIV_ROUND_UP(rate, 1000);
115
116 spin_lock_irqsave(&rpm_clock_lock, flags);
117
118 /* Ignore duplicate requests. */
119 if (r->last_set_khz == this_khz)
120 goto out;
121
122 /* Active-only clocks don't care what the rate is during sleep. So,
123 * they vote for zero. */
124 if (r->active_only)
125 this_sleep_khz = 0;
126 else
127 this_sleep_khz = this_khz;
128
129 if (r->enabled) {
130 struct msm_rpm_iv_pair iv;
131 struct rpm_clk *peer = r->peer;
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700132 unsigned long peer_khz = 0, peer_sleep_khz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133
134 iv.id = r->rpm_clk_id;
135
136 /* Take peer clock's rate into account only if it's enabled. */
137 if (peer->enabled) {
138 peer_khz = peer->last_set_khz;
139 peer_sleep_khz = peer->last_set_sleep_khz;
140 }
141
142 iv.value = max(this_khz, peer_khz);
143 rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
144 if (rc)
145 goto out;
146
147 iv.value = max(this_sleep_khz, peer_sleep_khz);
148 rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_SLEEP, &iv, 1);
149 }
150 if (!rc) {
151 r->last_set_khz = this_khz;
152 r->last_set_sleep_khz = this_sleep_khz;
153 }
154
155out:
156 spin_unlock_irqrestore(&rpm_clock_lock, flags);
157
158 return rc;
159}
160
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700161static unsigned long rpm_clk_get_rate(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700162{
163 struct rpm_clk *r = to_rpm_clk(clk);
164 struct msm_rpm_iv_pair iv = { r->rpm_status_id };
165 int rc;
166
167 rc = msm_rpm_get_status(&iv, 1);
168 if (rc < 0)
169 return rc;
170 return iv.value * 1000;
171}
172
173static int rpm_clk_is_enabled(struct clk *clk)
174{
175 return !!(rpm_clk_get_rate(clk));
176}
177
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700178static long rpm_clk_round_rate(struct clk *clk, unsigned long rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179{
180 /* Not supported. */
181 return rate;
182}
183
184static bool rpm_clk_is_local(struct clk *clk)
185{
186 return false;
187}
188
189struct clk_ops clk_ops_rpm = {
190 .enable = rpm_clk_enable,
191 .disable = rpm_clk_disable,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192 .set_min_rate = rpm_clk_set_min_rate,
193 .get_rate = rpm_clk_get_rate,
194 .is_enabled = rpm_clk_is_enabled,
195 .round_rate = rpm_clk_round_rate,
196 .is_local = rpm_clk_is_local,
197};