blob: c7d24a8b0120553bc7d3fb6d12feffad8e2753b1 [file] [log] [blame]
Saravana Kannan37b86102013-04-24 21:51:21 -07001/*
2 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
15#include <linux/errno.h>
Saravana Kannanc7fcc9f2013-06-12 02:04:01 -070016#include <linux/err.h>
Saravana Kannan37b86102013-04-24 21:51:21 -070017#include <linux/clk.h>
Vikram Mulukutla4a1728f2013-07-24 14:14:08 -070018#include <linux/io.h>
19
Saravana Kannan37b86102013-04-24 21:51:21 -070020#include <mach/clk-provider.h>
21#include <mach/clock-generic.h>
22
23/* ==================== Mux clock ==================== */
24
Patrick Dalyfdade5b2013-08-06 18:08:35 -070025int parent_to_src_sel(struct clk_src *parents, int num_parents, struct clk *p)
Saravana Kannan37b86102013-04-24 21:51:21 -070026{
27 int i;
28
Patrick Dalyfdade5b2013-08-06 18:08:35 -070029 for (i = 0; i < num_parents; i++) {
30 if (parents[i].src == p)
31 return parents[i].sel;
Saravana Kannan37b86102013-04-24 21:51:21 -070032 }
33
34 return -EINVAL;
35}
36
37static int mux_set_parent(struct clk *c, struct clk *p)
38{
39 struct mux_clk *mux = to_mux_clk(c);
Patrick Dalyfdade5b2013-08-06 18:08:35 -070040 int sel = parent_to_src_sel(mux->parents, mux->num_parents, p);
Saravana Kannan37b86102013-04-24 21:51:21 -070041 struct clk *old_parent;
Saravana Kannan5b889992013-06-28 12:29:14 -070042 int rc = 0, i;
Saravana Kannan37b86102013-04-24 21:51:21 -070043 unsigned long flags;
44
Saravana Kannan5b889992013-06-28 12:29:14 -070045 if (sel < 0 && mux->rec_set_par) {
46 for (i = 0; i < mux->num_parents; i++) {
47 rc = clk_set_parent(mux->parents[i].src, p);
48 if (!rc) {
49 sel = mux->parents[i].sel;
50 /*
51 * This is necessary to ensure prepare/enable
52 * counts get propagated correctly.
53 */
54 p = mux->parents[i].src;
55 break;
56 }
57 }
58 }
59
Saravana Kannan37b86102013-04-24 21:51:21 -070060 if (sel < 0)
61 return sel;
62
63 rc = __clk_pre_reparent(c, p, &flags);
64 if (rc)
65 goto out;
66
67 rc = mux->ops->set_mux_sel(mux, sel);
68 if (rc)
69 goto set_fail;
70
71 old_parent = c->parent;
72 c->parent = p;
Saravana Kannanc1e0e482013-06-29 17:34:51 -070073 c->rate = clk_get_rate(p);
Saravana Kannan37b86102013-04-24 21:51:21 -070074 __clk_post_reparent(c, old_parent, &flags);
75
76 return 0;
77
78set_fail:
79 __clk_post_reparent(c, p, &flags);
80out:
81 return rc;
82}
83
84static long mux_round_rate(struct clk *c, unsigned long rate)
85{
86 struct mux_clk *mux = to_mux_clk(c);
87 int i;
Saravana Kannand5b7c7e2013-09-03 17:54:29 -070088 unsigned long prate, rrate = 0;
Saravana Kannan37b86102013-04-24 21:51:21 -070089
90 for (i = 0; i < mux->num_parents; i++) {
91 prate = clk_round_rate(mux->parents[i].src, rate);
Saravana Kannand5b7c7e2013-09-03 17:54:29 -070092 if (is_better_rate(rate, rrate, prate))
93 rrate = prate;
Saravana Kannan37b86102013-04-24 21:51:21 -070094 }
Saravana Kannand5b7c7e2013-09-03 17:54:29 -070095 if (!rrate)
96 return -EINVAL;
Saravana Kannan37b86102013-04-24 21:51:21 -070097
Saravana Kannand5b7c7e2013-09-03 17:54:29 -070098 return rrate;
Saravana Kannan37b86102013-04-24 21:51:21 -070099}
100
101static int mux_set_rate(struct clk *c, unsigned long rate)
102{
103 struct mux_clk *mux = to_mux_clk(c);
104 struct clk *new_parent = NULL;
105 int rc = 0, i;
106 unsigned long new_par_curr_rate;
Saravana Kannan60354b92013-06-27 21:08:08 -0700107 unsigned long flags;
Saravana Kannan37b86102013-04-24 21:51:21 -0700108
109 for (i = 0; i < mux->num_parents; i++) {
110 if (clk_round_rate(mux->parents[i].src, rate) == rate) {
111 new_parent = mux->parents[i].src;
112 break;
113 }
114 }
115 if (new_parent == NULL)
116 return -EINVAL;
117
118 /*
119 * Switch to safe parent since the old and new parent might be the
120 * same and the parent might temporarily turn off while switching
121 * rates.
122 */
Saravana Kannan60354b92013-06-27 21:08:08 -0700123 if (mux->safe_sel >= 0) {
124 /*
125 * Some mux implementations might switch to/from a low power
126 * parent as part of their disable/enable ops. Grab the
127 * enable lock to avoid racing with these implementations.
128 */
129 spin_lock_irqsave(&c->lock, flags);
Saravana Kannan37b86102013-04-24 21:51:21 -0700130 rc = mux->ops->set_mux_sel(mux, mux->safe_sel);
Saravana Kannan60354b92013-06-27 21:08:08 -0700131 spin_unlock_irqrestore(&c->lock, flags);
132 }
Saravana Kannan37b86102013-04-24 21:51:21 -0700133 if (rc)
134 return rc;
135
136 new_par_curr_rate = clk_get_rate(new_parent);
137 rc = clk_set_rate(new_parent, rate);
138 if (rc)
139 goto set_rate_fail;
140
141 rc = mux_set_parent(c, new_parent);
142 if (rc)
143 goto set_par_fail;
144
145 return 0;
146
147set_par_fail:
148 clk_set_rate(new_parent, new_par_curr_rate);
149set_rate_fail:
Patrick Dalyfdade5b2013-08-06 18:08:35 -0700150 WARN(mux->ops->set_mux_sel(mux,
151 parent_to_src_sel(mux->parents, mux->num_parents, c->parent)),
Saravana Kannan37b86102013-04-24 21:51:21 -0700152 "Set rate failed for %s. Also in bad state!\n", c->dbg_name);
153 return rc;
154}
155
156static int mux_enable(struct clk *c)
157{
158 struct mux_clk *mux = to_mux_clk(c);
159 if (mux->ops->enable)
160 return mux->ops->enable(mux);
161 return 0;
162}
163
164static void mux_disable(struct clk *c)
165{
166 struct mux_clk *mux = to_mux_clk(c);
167 if (mux->ops->disable)
168 return mux->ops->disable(mux);
169}
170
171static struct clk *mux_get_parent(struct clk *c)
172{
173 struct mux_clk *mux = to_mux_clk(c);
174 int sel = mux->ops->get_mux_sel(mux);
175 int i;
176
177 for (i = 0; i < mux->num_parents; i++) {
178 if (mux->parents[i].sel == sel)
179 return mux->parents[i].src;
180 }
181
182 /* Unfamiliar parent. */
183 return NULL;
184}
185
186static enum handoff mux_handoff(struct clk *c)
187{
188 struct mux_clk *mux = to_mux_clk(c);
189
190 c->rate = clk_get_rate(c->parent);
Patrick Dalyfdade5b2013-08-06 18:08:35 -0700191 mux->safe_sel = parent_to_src_sel(mux->parents, mux->num_parents,
192 mux->safe_parent);
Saravana Kannan37b86102013-04-24 21:51:21 -0700193
194 if (mux->en_mask && mux->ops && mux->ops->is_enabled)
195 return mux->ops->is_enabled(mux)
196 ? HANDOFF_ENABLED_CLK
197 : HANDOFF_DISABLED_CLK;
198
199 /*
200 * If this function returns 'enabled' even when the clock downstream
201 * of this clock is disabled, then handoff code will unnecessarily
202 * enable the current parent of this clock. If this function always
203 * returns 'disabled' and a clock downstream is on, the clock handoff
204 * code will bump up the ref count for this clock and its current
205 * parent as necessary. So, clocks without an actual HW gate can
206 * always return disabled.
207 */
208 return HANDOFF_DISABLED_CLK;
209}
210
211struct clk_ops clk_ops_gen_mux = {
212 .enable = mux_enable,
213 .disable = mux_disable,
214 .set_parent = mux_set_parent,
215 .round_rate = mux_round_rate,
216 .set_rate = mux_set_rate,
217 .handoff = mux_handoff,
218 .get_parent = mux_get_parent,
219};
Saravana Kannand3922632013-04-29 23:52:37 -0700220
Saravana Kannand3922632013-04-29 23:52:37 -0700221/* ==================== Divider clock ==================== */
222
Patrick Daly86abe9b2013-09-03 18:55:37 -0700223static long __div_round_rate(struct div_data *data, unsigned long rate,
Patrick Daly67bfbdc2013-09-03 19:37:09 -0700224 struct clk *parent, unsigned int *best_div, unsigned long *best_prate)
Saravana Kannand3922632013-04-29 23:52:37 -0700225{
Patrick Daly67bfbdc2013-09-03 19:37:09 -0700226 unsigned int div, min_div, max_div, _best_div = 1;
227 unsigned long prate, _best_prate = 0, rrate = 0;
Saravana Kannand3922632013-04-29 23:52:37 -0700228
229 rate = max(rate, 1UL);
230
Patrick Daly86abe9b2013-09-03 18:55:37 -0700231 min_div = max(data->min_div, 1U);
232 max_div = min(data->max_div, (unsigned int) (ULONG_MAX / rate));
Saravana Kannand3922632013-04-29 23:52:37 -0700233
234 for (div = min_div; div <= max_div; div++) {
Patrick Daly67bfbdc2013-09-03 19:37:09 -0700235 prate = clk_round_rate(parent, rate * div);
236 if (IS_ERR_VALUE(prate))
Saravana Kannand3922632013-04-29 23:52:37 -0700237 break;
238
Patrick Daly67bfbdc2013-09-03 19:37:09 -0700239 if (is_better_rate(rate, rrate, prate / div)) {
240 rrate = prate / div;
241 _best_div = div;
242 _best_prate = prate;
Saravana Kannand5b7c7e2013-09-03 17:54:29 -0700243 }
244
Saravana Kannand3922632013-04-29 23:52:37 -0700245 /*
246 * Trying higher dividers is only going to ask the parent for
247 * a higher rate. If it can't even output a rate higher than
248 * the one we request for this divider, the parent is not
249 * going to be able to output an even higher rate required
250 * for a higher divider. So, stop trying higher dividers.
251 */
Patrick Daly67bfbdc2013-09-03 19:37:09 -0700252 if (prate / div < rate)
Saravana Kannand3922632013-04-29 23:52:37 -0700253 break;
Saravana Kannand3922632013-04-29 23:52:37 -0700254
Patrick Daly86abe9b2013-09-03 18:55:37 -0700255 if (rrate <= rate + data->rate_margin)
Saravana Kannand3922632013-04-29 23:52:37 -0700256 break;
257 }
258
Saravana Kannand5b7c7e2013-09-03 17:54:29 -0700259 if (!rrate)
Saravana Kannand3922632013-04-29 23:52:37 -0700260 return -EINVAL;
Saravana Kannand5b7c7e2013-09-03 17:54:29 -0700261 if (best_div)
Patrick Daly67bfbdc2013-09-03 19:37:09 -0700262 *best_div = _best_div;
263 if (best_prate)
264 *best_prate = _best_prate;
Saravana Kannand3922632013-04-29 23:52:37 -0700265
266 return rrate;
267}
268
269static long div_round_rate(struct clk *c, unsigned long rate)
270{
Patrick Daly86abe9b2013-09-03 18:55:37 -0700271 struct div_clk *d = to_div_clk(c);
272
Patrick Daly67bfbdc2013-09-03 19:37:09 -0700273 return __div_round_rate(&d->data, rate, c->parent, NULL, NULL);
Saravana Kannand3922632013-04-29 23:52:37 -0700274}
275
276static int div_set_rate(struct clk *c, unsigned long rate)
277{
278 struct div_clk *d = to_div_clk(c);
279 int div, rc = 0;
Patrick Daly67bfbdc2013-09-03 19:37:09 -0700280 long rrate, old_prate, new_prate;
Patrick Daly86abe9b2013-09-03 18:55:37 -0700281 struct div_data *data = &d->data;
Saravana Kannand3922632013-04-29 23:52:37 -0700282
Patrick Daly67bfbdc2013-09-03 19:37:09 -0700283 rrate = __div_round_rate(data, rate, c->parent, &div, &new_prate);
Saravana Kannand3922632013-04-29 23:52:37 -0700284 if (rrate != rate)
285 return -EINVAL;
286
Saravana Kannan0498b162013-06-25 21:53:10 -0700287 /*
288 * For fixed divider clock we don't want to return an error if the
289 * requested rate matches the achievable rate. So, don't check for
290 * !d->ops and return an error. __div_round_rate() ensures div ==
291 * d->div if !d->ops.
292 */
Patrick Daly86abe9b2013-09-03 18:55:37 -0700293 if (div > data->div)
Saravana Kannand3922632013-04-29 23:52:37 -0700294 rc = d->ops->set_div(d, div);
295 if (rc)
296 return rc;
297
298 old_prate = clk_get_rate(c->parent);
Patrick Daly67bfbdc2013-09-03 19:37:09 -0700299 rc = clk_set_rate(c->parent, new_prate);
Saravana Kannand3922632013-04-29 23:52:37 -0700300 if (rc)
301 goto set_rate_fail;
302
Patrick Daly86abe9b2013-09-03 18:55:37 -0700303 if (div < data->div)
Saravana Kannand3922632013-04-29 23:52:37 -0700304 rc = d->ops->set_div(d, div);
305 if (rc)
306 goto div_dec_fail;
307
Patrick Daly86abe9b2013-09-03 18:55:37 -0700308 data->div = div;
Saravana Kannand3922632013-04-29 23:52:37 -0700309
310 return 0;
311
312div_dec_fail:
313 WARN(clk_set_rate(c->parent, old_prate),
314 "Set rate failed for %s. Also in bad state!\n", c->dbg_name);
315set_rate_fail:
Patrick Daly86abe9b2013-09-03 18:55:37 -0700316 if (div > data->div)
317 WARN(d->ops->set_div(d, data->div),
Saravana Kannand3922632013-04-29 23:52:37 -0700318 "Set rate failed for %s. Also in bad state!\n",
319 c->dbg_name);
320 return rc;
321}
322
323static int div_enable(struct clk *c)
324{
325 struct div_clk *d = to_div_clk(c);
Saravana Kannan0498b162013-06-25 21:53:10 -0700326 if (d->ops && d->ops->enable)
Saravana Kannand3922632013-04-29 23:52:37 -0700327 return d->ops->enable(d);
328 return 0;
329}
330
331static void div_disable(struct clk *c)
332{
333 struct div_clk *d = to_div_clk(c);
Saravana Kannan0498b162013-06-25 21:53:10 -0700334 if (d->ops && d->ops->disable)
Saravana Kannand3922632013-04-29 23:52:37 -0700335 return d->ops->disable(d);
336}
337
338static enum handoff div_handoff(struct clk *c)
339{
340 struct div_clk *d = to_div_clk(c);
Patrick Daly86abe9b2013-09-03 18:55:37 -0700341 unsigned int div = d->data.div;
Saravana Kannand3922632013-04-29 23:52:37 -0700342
Saravana Kannan0498b162013-06-25 21:53:10 -0700343 if (d->ops && d->ops->get_div)
Patrick Daly86abe9b2013-09-03 18:55:37 -0700344 div = max(d->ops->get_div(d), 1);
345 div = max(div, 1U);
346 c->rate = clk_get_rate(c->parent) / div;
347
348 if (!d->ops || !d->ops->set_div)
349 d->data.min_div = d->data.max_div = div;
350 d->data.div = div;
Saravana Kannand3922632013-04-29 23:52:37 -0700351
352 if (d->en_mask && d->ops && d->ops->is_enabled)
353 return d->ops->is_enabled(d)
354 ? HANDOFF_ENABLED_CLK
355 : HANDOFF_DISABLED_CLK;
356
357 /*
358 * If this function returns 'enabled' even when the clock downstream
359 * of this clock is disabled, then handoff code will unnecessarily
360 * enable the current parent of this clock. If this function always
361 * returns 'disabled' and a clock downstream is on, the clock handoff
362 * code will bump up the ref count for this clock and its current
363 * parent as necessary. So, clocks without an actual HW gate can
364 * always return disabled.
365 */
366 return HANDOFF_DISABLED_CLK;
367}
368
369struct clk_ops clk_ops_div = {
370 .enable = div_enable,
371 .disable = div_disable,
372 .round_rate = div_round_rate,
373 .set_rate = div_set_rate,
374 .handoff = div_handoff,
375};
376
377static long __slave_div_round_rate(struct clk *c, unsigned long rate,
378 int *best_div)
379{
380 struct div_clk *d = to_div_clk(c);
381 unsigned int div, min_div, max_div;
382 long p_rate;
383
384 rate = max(rate, 1UL);
385
Patrick Daly86abe9b2013-09-03 18:55:37 -0700386 min_div = d->data.min_div;
387 max_div = d->data.max_div;
Saravana Kannand3922632013-04-29 23:52:37 -0700388
389 p_rate = clk_get_rate(c->parent);
390 div = p_rate / rate;
391 div = max(div, min_div);
392 div = min(div, max_div);
393 if (best_div)
394 *best_div = div;
395
396 return p_rate / div;
397}
398
399static long slave_div_round_rate(struct clk *c, unsigned long rate)
400{
401 return __slave_div_round_rate(c, rate, NULL);
402}
403
404static int slave_div_set_rate(struct clk *c, unsigned long rate)
405{
406 struct div_clk *d = to_div_clk(c);
407 int div, rc = 0;
408 long rrate;
409
410 rrate = __slave_div_round_rate(c, rate, &div);
411 if (rrate != rate)
412 return -EINVAL;
413
Patrick Daly86abe9b2013-09-03 18:55:37 -0700414 if (div == d->data.div)
Saravana Kannand3922632013-04-29 23:52:37 -0700415 return 0;
416
Saravana Kannan0498b162013-06-25 21:53:10 -0700417 /*
418 * For fixed divider clock we don't want to return an error if the
419 * requested rate matches the achievable rate. So, don't check for
420 * !d->ops and return an error. __slave_div_round_rate() ensures
Patrick Daly86abe9b2013-09-03 18:55:37 -0700421 * div == d->data.div if !d->ops.
Saravana Kannan0498b162013-06-25 21:53:10 -0700422 */
423 rc = d->ops->set_div(d, div);
Saravana Kannand3922632013-04-29 23:52:37 -0700424 if (rc)
425 return rc;
426
Patrick Daly86abe9b2013-09-03 18:55:37 -0700427 d->data.div = div;
Saravana Kannand3922632013-04-29 23:52:37 -0700428
429 return 0;
430}
431
Vikram Mulukutlab7c01012013-09-23 19:38:24 -0700432static unsigned long slave_div_get_rate(struct clk *c)
433{
434 struct div_clk *d = to_div_clk(c);
435 if (!d->data.div)
436 return 0;
437 return clk_get_rate(c->parent) / d->data.div;
438}
439
Saravana Kannand3922632013-04-29 23:52:37 -0700440struct clk_ops clk_ops_slave_div = {
441 .enable = div_enable,
442 .disable = div_disable,
443 .round_rate = slave_div_round_rate,
444 .set_rate = slave_div_set_rate,
Vikram Mulukutlab7c01012013-09-23 19:38:24 -0700445 .get_rate = slave_div_get_rate,
Saravana Kannand3922632013-04-29 23:52:37 -0700446 .handoff = div_handoff,
447};
Vikram Mulukutla402a1c32013-07-19 18:57:28 -0700448
449
450/**
451 * External clock
452 * Some clock controllers have input clock signal that come from outside the
453 * clock controller. That input clock signal might then be used as a source for
454 * several clocks inside the clock controller. This external clock
455 * implementation models this input clock signal by just passing on the requests
456 * to the clock's parent, the original external clock source. The driver for the
457 * clock controller should clk_get() the original external clock in the probe
458 * function and set is as a parent to this external clock..
459 */
460
461static long ext_round_rate(struct clk *c, unsigned long rate)
462{
463 return clk_round_rate(c->parent, rate);
464}
465
466static int ext_set_rate(struct clk *c, unsigned long rate)
467{
468 return clk_set_rate(c->parent, rate);
469}
470
Vikram Mulukutla508907c2013-09-23 17:48:21 -0700471static unsigned long ext_get_rate(struct clk *c)
472{
473 return clk_get_rate(c->parent);
474}
475
Vikram Mulukutla402a1c32013-07-19 18:57:28 -0700476static int ext_set_parent(struct clk *c, struct clk *p)
477{
478 return clk_set_parent(c->parent, p);
479}
480
481static enum handoff ext_handoff(struct clk *c)
482{
483 c->rate = clk_get_rate(c->parent);
484 /* Similar reasoning applied in div_handoff, see comment there. */
485 return HANDOFF_DISABLED_CLK;
486}
487
488struct clk_ops clk_ops_ext = {
489 .handoff = ext_handoff,
490 .round_rate = ext_round_rate,
491 .set_rate = ext_set_rate,
Vikram Mulukutla508907c2013-09-23 17:48:21 -0700492 .get_rate = ext_get_rate,
Vikram Mulukutla402a1c32013-07-19 18:57:28 -0700493 .set_parent = ext_set_parent,
494};
495
Patrick Dalyfdade5b2013-08-06 18:08:35 -0700496
497/* ==================== Mux_div clock ==================== */
498
499static int mux_div_clk_enable(struct clk *c)
500{
501 struct mux_div_clk *md = to_mux_div_clk(c);
502
503 if (md->ops->enable)
504 return md->ops->enable(md);
505 return 0;
506}
507
508static void mux_div_clk_disable(struct clk *c)
509{
510 struct mux_div_clk *md = to_mux_div_clk(c);
511
512 if (md->ops->disable)
513 return md->ops->disable(md);
514}
515
516static long __mux_div_round_rate(struct clk *c, unsigned long rate,
517 struct clk **best_parent, int *best_div, unsigned long *best_prate)
518{
519 struct mux_div_clk *md = to_mux_div_clk(c);
520 unsigned int i;
521 unsigned long rrate, best = 0, _best_div = 0, _best_prate = 0;
522 struct clk *_best_parent = 0;
523
524 for (i = 0; i < md->num_parents; i++) {
525 int div;
526 unsigned long prate;
527
528 rrate = __div_round_rate(&md->data, rate, md->parents[i].src,
529 &div, &prate);
530
531 if (is_better_rate(rate, best, rrate)) {
532 best = rrate;
533 _best_div = div;
534 _best_prate = prate;
535 _best_parent = md->parents[i].src;
536 }
537
538 if (rate <= rrate && rrate <= rate + md->data.rate_margin)
539 break;
540 }
541
542 if (best_div)
543 *best_div = _best_div;
544 if (best_prate)
545 *best_prate = _best_prate;
546 if (best_parent)
547 *best_parent = _best_parent;
548
549 if (best)
550 return best;
551 return -EINVAL;
552}
553
554static long mux_div_clk_round_rate(struct clk *c, unsigned long rate)
555{
556 return __mux_div_round_rate(c, rate, NULL, NULL, NULL);
557}
558
559/* requires enable lock to be held */
560static int __set_src_div(struct mux_div_clk *md, struct clk *parent, u32 div)
561{
562 u32 rc = 0, src_sel;
563
564 src_sel = parent_to_src_sel(md->parents, md->num_parents, parent);
565 /*
566 * If the clock is disabled, don't change to the new settings until
567 * the clock is reenabled
568 */
569 if (md->c.count)
570 rc = md->ops->set_src_div(md, src_sel, div);
571 if (!rc) {
572 md->data.div = div;
573 md->src_sel = src_sel;
574 }
575
576 return rc;
577}
578
579static int set_src_div(struct mux_div_clk *md, struct clk *parent, u32 div)
580{
581 unsigned long flags;
582 u32 rc;
583
584 spin_lock_irqsave(&md->c.lock, flags);
585 rc = __set_src_div(md, parent, div);
586 spin_unlock_irqrestore(&md->c.lock, flags);
587
588 return rc;
589}
590
591/* Must be called after handoff to ensure parent clock rates are initialized */
592static int safe_parent_init_once(struct clk *c)
593{
594 unsigned long rrate;
595 u32 best_div;
596 struct clk *best_parent;
597 struct mux_div_clk *md = to_mux_div_clk(c);
598
599 if (IS_ERR(md->safe_parent))
600 return -EINVAL;
601 if (!md->safe_freq || md->safe_parent)
602 return 0;
603
604 rrate = __mux_div_round_rate(c, md->safe_freq, &best_parent,
605 &best_div, NULL);
606
607 if (rrate == md->safe_freq) {
608 md->safe_div = best_div;
609 md->safe_parent = best_parent;
610 } else {
611 md->safe_parent = ERR_PTR(-EINVAL);
612 return -EINVAL;
613 }
614 return 0;
615}
616
617static int mux_div_clk_set_rate(struct clk *c, unsigned long rate)
618{
619 struct mux_div_clk *md = to_mux_div_clk(c);
620 unsigned long flags, rrate;
621 unsigned long new_prate, old_prate;
622 struct clk *old_parent, *new_parent;
623 u32 new_div, old_div;
624 int rc;
625
626 rc = safe_parent_init_once(c);
627 if (rc)
628 return rc;
629
630 rrate = __mux_div_round_rate(c, rate, &new_parent, &new_div,
631 &new_prate);
632 if (rrate != rate)
633 return -EINVAL;
634
635 old_parent = c->parent;
636 old_div = md->data.div;
637 old_prate = clk_get_rate(c->parent);
638
639 /* Refer to the description of safe_freq in clock-generic.h */
640 if (md->safe_freq)
641 rc = set_src_div(md, md->safe_parent, md->safe_div);
642
643 else if (new_parent == old_parent && new_div >= old_div) {
644 /*
645 * If both the parent_rate and divider changes, there may be an
646 * intermediate frequency generated. Ensure this intermediate
647 * frequency is less than both the new rate and previous rate.
648 */
649 rc = set_src_div(md, old_parent, new_div);
650 }
651 if (rc)
652 return rc;
653
654 rc = clk_set_rate(new_parent, new_prate);
655 if (rc) {
656 pr_err("failed to set %s to %ld\n",
657 new_parent->dbg_name, new_prate);
658 goto err_set_rate;
659 }
660
661 rc = __clk_pre_reparent(c, new_parent, &flags);
662 if (rc)
663 goto err_pre_reparent;
664
665 /* Set divider and mux src atomically */
666 rc = __set_src_div(md, new_parent, new_div);
667 if (rc)
668 goto err_set_src_div;
669
670 c->parent = new_parent;
671
672 __clk_post_reparent(c, old_parent, &flags);
673 return 0;
674
675err_set_src_div:
676 /* Not switching to new_parent, so disable it */
677 __clk_post_reparent(c, new_parent, &flags);
678err_pre_reparent:
679 rc = clk_set_rate(old_parent, old_prate);
680 WARN(rc, "%s: error changing parent (%s) rate to %ld\n",
681 c->dbg_name, old_parent->dbg_name, old_prate);
682err_set_rate:
683 rc = set_src_div(md, old_parent, old_div);
684 WARN(rc, "%s: error changing back to original div (%d) and parent (%s)\n",
685 c->dbg_name, old_div, old_parent->dbg_name);
686
687 return rc;
688}
689
690static struct clk *mux_div_clk_get_parent(struct clk *c)
691{
692 struct mux_div_clk *md = to_mux_div_clk(c);
693 u32 i, div, src_sel;
694
695 md->ops->get_src_div(md, &src_sel, &div);
696
697 md->data.div = div;
698 md->src_sel = src_sel;
699
700 for (i = 0; i < md->num_parents; i++) {
701 if (md->parents[i].sel == src_sel)
702 return md->parents[i].src;
703 }
704
705 return NULL;
706}
707
708static enum handoff mux_div_clk_handoff(struct clk *c)
709{
710 struct mux_div_clk *md = to_mux_div_clk(c);
711 unsigned long parent_rate;
712
713 parent_rate = clk_get_rate(c->parent);
714 c->rate = parent_rate / md->data.div;
715
716 if (!md->ops->is_enabled)
717 return HANDOFF_DISABLED_CLK;
718 if (md->ops->is_enabled(md))
719 return HANDOFF_ENABLED_CLK;
720 return HANDOFF_DISABLED_CLK;
721}
722
723struct clk_ops clk_ops_mux_div_clk = {
724 .enable = mux_div_clk_enable,
725 .disable = mux_div_clk_disable,
726 .set_rate = mux_div_clk_set_rate,
727 .round_rate = mux_div_clk_round_rate,
728 .get_parent = mux_div_clk_get_parent,
729 .handoff = mux_div_clk_handoff,
730};