blob: bea82d5bafcd05799a4d090d96c8eea4119ef9a7 [file] [log] [blame]
Saravana Kannan37b86102013-04-24 21:51:21 -07001/*
2 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
15#include <linux/errno.h>
Saravana Kannanc7fcc9f2013-06-12 02:04:01 -070016#include <linux/err.h>
Saravana Kannan37b86102013-04-24 21:51:21 -070017
18#include <linux/clk.h>
19#include <mach/clk-provider.h>
20#include <mach/clock-generic.h>
21
22/* ==================== Mux clock ==================== */
23
24static int parent_to_src_sel(struct mux_clk *mux, struct clk *p)
25{
26 int i;
27
28 for (i = 0; i < mux->num_parents; i++) {
29 if (mux->parents[i].src == p)
30 return mux->parents[i].sel;
31 }
32
33 return -EINVAL;
34}
35
36static int mux_set_parent(struct clk *c, struct clk *p)
37{
38 struct mux_clk *mux = to_mux_clk(c);
39 int sel = parent_to_src_sel(mux, p);
40 struct clk *old_parent;
41 int rc = 0;
42 unsigned long flags;
43
44 if (sel < 0)
45 return sel;
46
47 rc = __clk_pre_reparent(c, p, &flags);
48 if (rc)
49 goto out;
50
51 rc = mux->ops->set_mux_sel(mux, sel);
52 if (rc)
53 goto set_fail;
54
55 old_parent = c->parent;
56 c->parent = p;
57 __clk_post_reparent(c, old_parent, &flags);
58
59 return 0;
60
61set_fail:
62 __clk_post_reparent(c, p, &flags);
63out:
64 return rc;
65}
66
67static long mux_round_rate(struct clk *c, unsigned long rate)
68{
69 struct mux_clk *mux = to_mux_clk(c);
70 int i;
Saravana Kannanc7fcc9f2013-06-12 02:04:01 -070071 unsigned long prate, max_prate = 0, rrate = ULONG_MAX;
Saravana Kannan37b86102013-04-24 21:51:21 -070072
73 for (i = 0; i < mux->num_parents; i++) {
74 prate = clk_round_rate(mux->parents[i].src, rate);
Saravana Kannanc7fcc9f2013-06-12 02:04:01 -070075 if (IS_ERR_VALUE(prate))
76 continue;
Saravana Kannan37b86102013-04-24 21:51:21 -070077 if (prate < rate) {
78 max_prate = max(prate, max_prate);
79 continue;
80 }
81
82 rrate = min(rrate, prate);
83 }
Saravana Kannanc7fcc9f2013-06-12 02:04:01 -070084 if (rrate == ULONG_MAX)
Saravana Kannan37b86102013-04-24 21:51:21 -070085 rrate = max_prate;
86
87 return rrate ? rrate : -EINVAL;
88}
89
90static int mux_set_rate(struct clk *c, unsigned long rate)
91{
92 struct mux_clk *mux = to_mux_clk(c);
93 struct clk *new_parent = NULL;
94 int rc = 0, i;
95 unsigned long new_par_curr_rate;
96
97 for (i = 0; i < mux->num_parents; i++) {
98 if (clk_round_rate(mux->parents[i].src, rate) == rate) {
99 new_parent = mux->parents[i].src;
100 break;
101 }
102 }
103 if (new_parent == NULL)
104 return -EINVAL;
105
106 /*
107 * Switch to safe parent since the old and new parent might be the
108 * same and the parent might temporarily turn off while switching
109 * rates.
110 */
111 if (mux->safe_sel >= 0)
112 rc = mux->ops->set_mux_sel(mux, mux->safe_sel);
113 if (rc)
114 return rc;
115
116 new_par_curr_rate = clk_get_rate(new_parent);
117 rc = clk_set_rate(new_parent, rate);
118 if (rc)
119 goto set_rate_fail;
120
121 rc = mux_set_parent(c, new_parent);
122 if (rc)
123 goto set_par_fail;
124
125 return 0;
126
127set_par_fail:
128 clk_set_rate(new_parent, new_par_curr_rate);
129set_rate_fail:
130 WARN(mux->ops->set_mux_sel(mux, parent_to_src_sel(mux, c->parent)),
131 "Set rate failed for %s. Also in bad state!\n", c->dbg_name);
132 return rc;
133}
134
135static int mux_enable(struct clk *c)
136{
137 struct mux_clk *mux = to_mux_clk(c);
138 if (mux->ops->enable)
139 return mux->ops->enable(mux);
140 return 0;
141}
142
143static void mux_disable(struct clk *c)
144{
145 struct mux_clk *mux = to_mux_clk(c);
146 if (mux->ops->disable)
147 return mux->ops->disable(mux);
148}
149
150static struct clk *mux_get_parent(struct clk *c)
151{
152 struct mux_clk *mux = to_mux_clk(c);
153 int sel = mux->ops->get_mux_sel(mux);
154 int i;
155
156 for (i = 0; i < mux->num_parents; i++) {
157 if (mux->parents[i].sel == sel)
158 return mux->parents[i].src;
159 }
160
161 /* Unfamiliar parent. */
162 return NULL;
163}
164
165static enum handoff mux_handoff(struct clk *c)
166{
167 struct mux_clk *mux = to_mux_clk(c);
168
169 c->rate = clk_get_rate(c->parent);
170 mux->safe_sel = parent_to_src_sel(mux, mux->safe_parent);
171
172 if (mux->en_mask && mux->ops && mux->ops->is_enabled)
173 return mux->ops->is_enabled(mux)
174 ? HANDOFF_ENABLED_CLK
175 : HANDOFF_DISABLED_CLK;
176
177 /*
178 * If this function returns 'enabled' even when the clock downstream
179 * of this clock is disabled, then handoff code will unnecessarily
180 * enable the current parent of this clock. If this function always
181 * returns 'disabled' and a clock downstream is on, the clock handoff
182 * code will bump up the ref count for this clock and its current
183 * parent as necessary. So, clocks without an actual HW gate can
184 * always return disabled.
185 */
186 return HANDOFF_DISABLED_CLK;
187}
188
189struct clk_ops clk_ops_gen_mux = {
190 .enable = mux_enable,
191 .disable = mux_disable,
192 .set_parent = mux_set_parent,
193 .round_rate = mux_round_rate,
194 .set_rate = mux_set_rate,
195 .handoff = mux_handoff,
196 .get_parent = mux_get_parent,
197};
Saravana Kannand3922632013-04-29 23:52:37 -0700198
199
200/* ==================== Divider clock ==================== */
201
202static long __div_round_rate(struct clk *c, unsigned long rate, int *best_div)
203{
204 struct div_clk *d = to_div_clk(c);
205 unsigned int div, min_div, max_div;
Saravana Kannanc7fcc9f2013-06-12 02:04:01 -0700206 unsigned long p_rrate, rrate = ULONG_MAX;
Saravana Kannand3922632013-04-29 23:52:37 -0700207
208 rate = max(rate, 1UL);
209
210 if (!d->ops || !d->ops->set_div)
211 min_div = max_div = d->div;
212 else {
213 min_div = max(d->min_div, 1U);
Saravana Kannanc7fcc9f2013-06-12 02:04:01 -0700214 max_div = min(d->max_div, (unsigned int) (ULONG_MAX / rate));
Saravana Kannand3922632013-04-29 23:52:37 -0700215 }
216
217 for (div = min_div; div <= max_div; div++) {
218 p_rrate = clk_round_rate(c->parent, rate * div);
Saravana Kannanc7fcc9f2013-06-12 02:04:01 -0700219 if (IS_ERR_VALUE(p_rrate))
Saravana Kannand3922632013-04-29 23:52:37 -0700220 break;
221
222 p_rrate /= div;
223 /*
224 * Trying higher dividers is only going to ask the parent for
225 * a higher rate. If it can't even output a rate higher than
226 * the one we request for this divider, the parent is not
227 * going to be able to output an even higher rate required
228 * for a higher divider. So, stop trying higher dividers.
229 */
230 if (p_rrate < rate) {
Saravana Kannanc7fcc9f2013-06-12 02:04:01 -0700231 if (rrate == ULONG_MAX) {
Saravana Kannand3922632013-04-29 23:52:37 -0700232 rrate = p_rrate;
233 if (best_div)
234 *best_div = div;
235 }
236 break;
237 }
238 if (p_rrate < rrate) {
239 rrate = p_rrate;
240 if (best_div)
241 *best_div = div;
242 }
243
244 if (rrate <= rate + d->rate_margin)
245 break;
246 }
247
Saravana Kannanc7fcc9f2013-06-12 02:04:01 -0700248 if (rrate == ULONG_MAX)
Saravana Kannand3922632013-04-29 23:52:37 -0700249 return -EINVAL;
250
251 return rrate;
252}
253
254static long div_round_rate(struct clk *c, unsigned long rate)
255{
256 return __div_round_rate(c, rate, NULL);
257}
258
259static int div_set_rate(struct clk *c, unsigned long rate)
260{
261 struct div_clk *d = to_div_clk(c);
262 int div, rc = 0;
263 long rrate, old_prate;
264
265 rrate = __div_round_rate(c, rate, &div);
266 if (rrate != rate)
267 return -EINVAL;
268
Saravana Kannan0498b162013-06-25 21:53:10 -0700269 /*
270 * For fixed divider clock we don't want to return an error if the
271 * requested rate matches the achievable rate. So, don't check for
272 * !d->ops and return an error. __div_round_rate() ensures div ==
273 * d->div if !d->ops.
274 */
Saravana Kannand3922632013-04-29 23:52:37 -0700275 if (div > d->div)
276 rc = d->ops->set_div(d, div);
277 if (rc)
278 return rc;
279
280 old_prate = clk_get_rate(c->parent);
281 rc = clk_set_rate(c->parent, rate * div);
282 if (rc)
283 goto set_rate_fail;
284
285 if (div < d->div)
286 rc = d->ops->set_div(d, div);
287 if (rc)
288 goto div_dec_fail;
289
290 d->div = div;
291
292 return 0;
293
294div_dec_fail:
295 WARN(clk_set_rate(c->parent, old_prate),
296 "Set rate failed for %s. Also in bad state!\n", c->dbg_name);
297set_rate_fail:
298 if (div > d->div)
299 WARN(d->ops->set_div(d, d->div),
300 "Set rate failed for %s. Also in bad state!\n",
301 c->dbg_name);
302 return rc;
303}
304
305static int div_enable(struct clk *c)
306{
307 struct div_clk *d = to_div_clk(c);
Saravana Kannan0498b162013-06-25 21:53:10 -0700308 if (d->ops && d->ops->enable)
Saravana Kannand3922632013-04-29 23:52:37 -0700309 return d->ops->enable(d);
310 return 0;
311}
312
313static void div_disable(struct clk *c)
314{
315 struct div_clk *d = to_div_clk(c);
Saravana Kannan0498b162013-06-25 21:53:10 -0700316 if (d->ops && d->ops->disable)
Saravana Kannand3922632013-04-29 23:52:37 -0700317 return d->ops->disable(d);
318}
319
320static enum handoff div_handoff(struct clk *c)
321{
322 struct div_clk *d = to_div_clk(c);
323
Saravana Kannan0498b162013-06-25 21:53:10 -0700324 if (d->ops && d->ops->get_div)
Saravana Kannand3922632013-04-29 23:52:37 -0700325 d->div = max(d->ops->get_div(d), 1);
326 d->div = max(d->div, 1U);
327 c->rate = clk_get_rate(c->parent) / d->div;
328
329 if (d->en_mask && d->ops && d->ops->is_enabled)
330 return d->ops->is_enabled(d)
331 ? HANDOFF_ENABLED_CLK
332 : HANDOFF_DISABLED_CLK;
333
334 /*
335 * If this function returns 'enabled' even when the clock downstream
336 * of this clock is disabled, then handoff code will unnecessarily
337 * enable the current parent of this clock. If this function always
338 * returns 'disabled' and a clock downstream is on, the clock handoff
339 * code will bump up the ref count for this clock and its current
340 * parent as necessary. So, clocks without an actual HW gate can
341 * always return disabled.
342 */
343 return HANDOFF_DISABLED_CLK;
344}
345
346struct clk_ops clk_ops_div = {
347 .enable = div_enable,
348 .disable = div_disable,
349 .round_rate = div_round_rate,
350 .set_rate = div_set_rate,
351 .handoff = div_handoff,
352};
353
354static long __slave_div_round_rate(struct clk *c, unsigned long rate,
355 int *best_div)
356{
357 struct div_clk *d = to_div_clk(c);
358 unsigned int div, min_div, max_div;
359 long p_rate;
360
361 rate = max(rate, 1UL);
362
363 if (!d->ops || !d->ops->set_div)
364 min_div = max_div = d->div;
365 else {
366 min_div = d->min_div;
367 max_div = d->max_div;
368 }
369
370 p_rate = clk_get_rate(c->parent);
371 div = p_rate / rate;
372 div = max(div, min_div);
373 div = min(div, max_div);
374 if (best_div)
375 *best_div = div;
376
377 return p_rate / div;
378}
379
380static long slave_div_round_rate(struct clk *c, unsigned long rate)
381{
382 return __slave_div_round_rate(c, rate, NULL);
383}
384
385static int slave_div_set_rate(struct clk *c, unsigned long rate)
386{
387 struct div_clk *d = to_div_clk(c);
388 int div, rc = 0;
389 long rrate;
390
391 rrate = __slave_div_round_rate(c, rate, &div);
392 if (rrate != rate)
393 return -EINVAL;
394
395 if (div == d->div)
396 return 0;
397
Saravana Kannan0498b162013-06-25 21:53:10 -0700398 /*
399 * For fixed divider clock we don't want to return an error if the
400 * requested rate matches the achievable rate. So, don't check for
401 * !d->ops and return an error. __slave_div_round_rate() ensures
402 * div == d->div if !d->ops.
403 */
404 rc = d->ops->set_div(d, div);
Saravana Kannand3922632013-04-29 23:52:37 -0700405 if (rc)
406 return rc;
407
408 d->div = div;
409
410 return 0;
411}
412
413struct clk_ops clk_ops_slave_div = {
414 .enable = div_enable,
415 .disable = div_disable,
416 .round_rate = slave_div_round_rate,
417 .set_rate = slave_div_set_rate,
418 .handoff = div_handoff,
419};