blob: e3d339e08309f66ac61d2286c7c28bca9c4cec76 [file] [log] [blame]
Alex Elder1f27f152014-02-14 12:29:18 -06001/*
2 * Copyright (C) 2013 Broadcom Corporation
3 * Copyright 2013 Linaro Limited
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation version 2.
8 *
9 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
10 * kind, whether express or implied; without even the implied warranty
11 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include "clk-kona.h"
16
17#include <linux/delay.h>
18
19#define CCU_ACCESS_PASSWORD 0xA5A500
20#define CLK_GATE_DELAY_LOOP 2000
21
22/* Bitfield operations */
23
24/* Produces a mask of set bits covering a range of a 32-bit value */
25static inline u32 bitfield_mask(u32 shift, u32 width)
26{
27 return ((1 << width) - 1) << shift;
28}
29
30/* Extract the value of a bitfield found within a given register value */
31static inline u32 bitfield_extract(u32 reg_val, u32 shift, u32 width)
32{
33 return (reg_val & bitfield_mask(shift, width)) >> shift;
34}
35
36/* Replace the value of a bitfield found within a given register value */
37static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val)
38{
39 u32 mask = bitfield_mask(shift, width);
40
41 return (reg_val & ~mask) | (val << shift);
42}
43
44/* Divider and scaling helpers */
45
46/*
47 * Implement DIV_ROUND_CLOSEST() for 64-bit dividend and both values
48 * unsigned. Note that unlike do_div(), the remainder is discarded
49 * and the return value is the quotient (not the remainder).
50 */
51u64 do_div_round_closest(u64 dividend, unsigned long divisor)
52{
53 u64 result;
54
55 result = dividend + ((u64)divisor >> 1);
56 (void)do_div(result, divisor);
57
58 return result;
59}
60
61/* Convert a divider into the scaled divisor value it represents. */
62static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
63{
64 return (u64)reg_div + ((u64)1 << div->frac_width);
65}
66
67/*
68 * Build a scaled divider value as close as possible to the
69 * given whole part (div_value) and fractional part (expressed
70 * in billionths).
71 */
72u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
73{
74 u64 combined;
75
76 BUG_ON(!div_value);
77 BUG_ON(billionths >= BILLION);
78
79 combined = (u64)div_value * BILLION + billionths;
80 combined <<= div->frac_width;
81
82 return do_div_round_closest(combined, BILLION);
83}
84
85/* The scaled minimum divisor representable by a divider */
86static inline u64
87scaled_div_min(struct bcm_clk_div *div)
88{
89 if (divider_is_fixed(div))
90 return (u64)div->fixed;
91
92 return scaled_div_value(div, 0);
93}
94
95/* The scaled maximum divisor representable by a divider */
96u64 scaled_div_max(struct bcm_clk_div *div)
97{
98 u32 reg_div;
99
100 if (divider_is_fixed(div))
101 return (u64)div->fixed;
102
103 reg_div = ((u32)1 << div->width) - 1;
104
105 return scaled_div_value(div, reg_div);
106}
107
108/*
109 * Convert a scaled divisor into its divider representation as
110 * stored in a divider register field.
111 */
112static inline u32
113divider(struct bcm_clk_div *div, u64 scaled_div)
114{
115 BUG_ON(scaled_div < scaled_div_min(div));
116 BUG_ON(scaled_div > scaled_div_max(div));
117
118 return (u32)(scaled_div - ((u64)1 << div->frac_width));
119}
120
121/* Return a rate scaled for use when dividing by a scaled divisor. */
122static inline u64
123scale_rate(struct bcm_clk_div *div, u32 rate)
124{
125 if (divider_is_fixed(div))
126 return (u64)rate;
127
128 return (u64)rate << div->frac_width;
129}
130
131/* CCU access */
132
133/* Read a 32-bit register value from a CCU's address space. */
134static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset)
135{
136 return readl(ccu->base + reg_offset);
137}
138
139/* Write a 32-bit register value into a CCU's address space. */
140static inline void
141__ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val)
142{
143 writel(reg_val, ccu->base + reg_offset);
144}
145
146static inline unsigned long ccu_lock(struct ccu_data *ccu)
147{
148 unsigned long flags;
149
150 spin_lock_irqsave(&ccu->lock, flags);
151
152 return flags;
153}
154static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags)
155{
156 spin_unlock_irqrestore(&ccu->lock, flags);
157}
158
159/*
160 * Enable/disable write access to CCU protected registers. The
161 * WR_ACCESS register for all CCUs is at offset 0.
162 */
163static inline void __ccu_write_enable(struct ccu_data *ccu)
164{
165 if (ccu->write_enabled) {
166 pr_err("%s: access already enabled for %s\n", __func__,
167 ccu->name);
168 return;
169 }
170 ccu->write_enabled = true;
171 __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1);
172}
173
174static inline void __ccu_write_disable(struct ccu_data *ccu)
175{
176 if (!ccu->write_enabled) {
177 pr_err("%s: access wasn't enabled for %s\n", __func__,
178 ccu->name);
179 return;
180 }
181
182 __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD);
183 ccu->write_enabled = false;
184}
185
186/*
187 * Poll a register in a CCU's address space, returning when the
188 * specified bit in that register's value is set (or clear). Delay
189 * a microsecond after each read of the register. Returns true if
190 * successful, or false if we gave up trying.
191 *
192 * Caller must ensure the CCU lock is held.
193 */
194static inline bool
195__ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want)
196{
197 unsigned int tries;
198 u32 bit_mask = 1 << bit;
199
200 for (tries = 0; tries < CLK_GATE_DELAY_LOOP; tries++) {
201 u32 val;
202 bool bit_val;
203
204 val = __ccu_read(ccu, reg_offset);
205 bit_val = (val & bit_mask) != 0;
206 if (bit_val == want)
207 return true;
208 udelay(1);
209 }
210 return false;
211}
212
213/* Gate operations */
214
215/* Determine whether a clock is gated. CCU lock must be held. */
216static bool
217__is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
218{
219 u32 bit_mask;
220 u32 reg_val;
221
222 /* If there is no gate we can assume it's enabled. */
223 if (!gate_exists(gate))
224 return true;
225
226 bit_mask = 1 << gate->status_bit;
227 reg_val = __ccu_read(ccu, gate->offset);
228
229 return (reg_val & bit_mask) != 0;
230}
231
232/* Determine whether a clock is gated. */
233static bool
234is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
235{
236 long flags;
237 bool ret;
238
239 /* Avoid taking the lock if we can */
240 if (!gate_exists(gate))
241 return true;
242
243 flags = ccu_lock(ccu);
244 ret = __is_clk_gate_enabled(ccu, gate);
245 ccu_unlock(ccu, flags);
246
247 return ret;
248}
249
250/*
251 * Commit our desired gate state to the hardware.
252 * Returns true if successful, false otherwise.
253 */
254static bool
255__gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate)
256{
257 u32 reg_val;
258 u32 mask;
259 bool enabled = false;
260
261 BUG_ON(!gate_exists(gate));
262 if (!gate_is_sw_controllable(gate))
263 return true; /* Nothing we can change */
264
265 reg_val = __ccu_read(ccu, gate->offset);
266
267 /* For a hardware/software gate, set which is in control */
268 if (gate_is_hw_controllable(gate)) {
269 mask = (u32)1 << gate->hw_sw_sel_bit;
270 if (gate_is_sw_managed(gate))
271 reg_val |= mask;
272 else
273 reg_val &= ~mask;
274 }
275
276 /*
277 * If software is in control, enable or disable the gate.
278 * If hardware is, clear the enabled bit for good measure.
279 * If a software controlled gate can't be disabled, we're
280 * required to write a 0 into the enable bit (but the gate
281 * will be enabled).
282 */
283 mask = (u32)1 << gate->en_bit;
284 if (gate_is_sw_managed(gate) && (enabled = gate_is_enabled(gate)) &&
285 !gate_is_no_disable(gate))
286 reg_val |= mask;
287 else
288 reg_val &= ~mask;
289
290 __ccu_write(ccu, gate->offset, reg_val);
291
292 /* For a hardware controlled gate, we're done */
293 if (!gate_is_sw_managed(gate))
294 return true;
295
296 /* Otherwise wait for the gate to be in desired state */
297 return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled);
298}
299
300/*
301 * Initialize a gate. Our desired state (hardware/software select,
302 * and if software, its enable state) is committed to hardware
303 * without the usual checks to see if it's already set up that way.
304 * Returns true if successful, false otherwise.
305 */
306static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate)
307{
308 if (!gate_exists(gate))
309 return true;
310 return __gate_commit(ccu, gate);
311}
312
313/*
314 * Set a gate to enabled or disabled state. Does nothing if the
315 * gate is not currently under software control, or if it is already
316 * in the requested state. Returns true if successful, false
317 * otherwise. CCU lock must be held.
318 */
319static bool
320__clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable)
321{
322 bool ret;
323
324 if (!gate_exists(gate) || !gate_is_sw_managed(gate))
325 return true; /* Nothing to do */
326
327 if (!enable && gate_is_no_disable(gate)) {
328 pr_warn("%s: invalid gate disable request (ignoring)\n",
329 __func__);
330 return true;
331 }
332
333 if (enable == gate_is_enabled(gate))
334 return true; /* No change */
335
336 gate_flip_enabled(gate);
337 ret = __gate_commit(ccu, gate);
338 if (!ret)
339 gate_flip_enabled(gate); /* Revert the change */
340
341 return ret;
342}
343
344/* Enable or disable a gate. Returns 0 if successful, -EIO otherwise */
345static int clk_gate(struct ccu_data *ccu, const char *name,
346 struct bcm_clk_gate *gate, bool enable)
347{
348 unsigned long flags;
349 bool success;
350
351 /*
352 * Avoid taking the lock if we can. We quietly ignore
353 * requests to change state that don't make sense.
354 */
355 if (!gate_exists(gate) || !gate_is_sw_managed(gate))
356 return 0;
357 if (!enable && gate_is_no_disable(gate))
358 return 0;
359
360 flags = ccu_lock(ccu);
361 __ccu_write_enable(ccu);
362
363 success = __clk_gate(ccu, gate, enable);
364
365 __ccu_write_disable(ccu);
366 ccu_unlock(ccu, flags);
367
368 if (success)
369 return 0;
370
371 pr_err("%s: failed to %s gate for %s\n", __func__,
372 enable ? "enable" : "disable", name);
373
374 return -EIO;
375}
376
377/* Trigger operations */
378
379/*
380 * Caller must ensure CCU lock is held and access is enabled.
381 * Returns true if successful, false otherwise.
382 */
383static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig)
384{
385 /* Trigger the clock and wait for it to finish */
386 __ccu_write(ccu, trig->offset, 1 << trig->bit);
387
388 return __ccu_wait_bit(ccu, trig->offset, trig->bit, false);
389}
390
391/* Divider operations */
392
393/* Read a divider value and return the scaled divisor it represents. */
394static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
395{
396 unsigned long flags;
397 u32 reg_val;
398 u32 reg_div;
399
400 if (divider_is_fixed(div))
401 return (u64)div->fixed;
402
403 flags = ccu_lock(ccu);
404 reg_val = __ccu_read(ccu, div->offset);
405 ccu_unlock(ccu, flags);
406
407 /* Extract the full divider field from the register value */
408 reg_div = bitfield_extract(reg_val, div->shift, div->width);
409
410 /* Return the scaled divisor value it represents */
411 return scaled_div_value(div, reg_div);
412}
413
414/*
415 * Convert a divider's scaled divisor value into its recorded form
416 * and commit it into the hardware divider register.
417 *
418 * Returns 0 on success. Returns -EINVAL for invalid arguments.
419 * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
420 */
421static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
422 struct bcm_clk_div *div, struct bcm_clk_trig *trig)
423{
424 bool enabled;
425 u32 reg_div;
426 u32 reg_val;
427 int ret = 0;
428
429 BUG_ON(divider_is_fixed(div));
430
431 /*
432 * If we're just initializing the divider, and no initial
433 * state was defined in the device tree, we just find out
434 * what its current value is rather than updating it.
435 */
436 if (div->scaled_div == BAD_SCALED_DIV_VALUE) {
437 reg_val = __ccu_read(ccu, div->offset);
438 reg_div = bitfield_extract(reg_val, div->shift, div->width);
439 div->scaled_div = scaled_div_value(div, reg_div);
440
441 return 0;
442 }
443
444 /* Convert the scaled divisor to the value we need to record */
445 reg_div = divider(div, div->scaled_div);
446
447 /* Clock needs to be enabled before changing the rate */
448 enabled = __is_clk_gate_enabled(ccu, gate);
449 if (!enabled && !__clk_gate(ccu, gate, true)) {
450 ret = -ENXIO;
451 goto out;
452 }
453
454 /* Replace the divider value and record the result */
455 reg_val = __ccu_read(ccu, div->offset);
456 reg_val = bitfield_replace(reg_val, div->shift, div->width, reg_div);
457 __ccu_write(ccu, div->offset, reg_val);
458
459 /* If the trigger fails we still want to disable the gate */
460 if (!__clk_trigger(ccu, trig))
461 ret = -EIO;
462
463 /* Disable the clock again if it was disabled to begin with */
464 if (!enabled && !__clk_gate(ccu, gate, false))
465 ret = ret ? ret : -ENXIO; /* return first error */
466out:
467 return ret;
468}
469
470/*
471 * Initialize a divider by committing our desired state to hardware
472 * without the usual checks to see if it's already set up that way.
473 * Returns true if successful, false otherwise.
474 */
475static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
476 struct bcm_clk_div *div, struct bcm_clk_trig *trig)
477{
478 if (!divider_exists(div) || divider_is_fixed(div))
479 return true;
480 return !__div_commit(ccu, gate, div, trig);
481}
482
483static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
484 struct bcm_clk_div *div, struct bcm_clk_trig *trig,
485 u64 scaled_div)
486{
487 unsigned long flags;
488 u64 previous;
489 int ret;
490
491 BUG_ON(divider_is_fixed(div));
492
493 previous = div->scaled_div;
494 if (previous == scaled_div)
495 return 0; /* No change */
496
497 div->scaled_div = scaled_div;
498
499 flags = ccu_lock(ccu);
500 __ccu_write_enable(ccu);
501
502 ret = __div_commit(ccu, gate, div, trig);
503
504 __ccu_write_disable(ccu);
505 ccu_unlock(ccu, flags);
506
507 if (ret)
508 div->scaled_div = previous; /* Revert the change */
509
510 return ret;
511
512}
513
514/* Common clock rate helpers */
515
516/*
517 * Implement the common clock framework recalc_rate method, taking
518 * into account a divider and an optional pre-divider. The
519 * pre-divider register pointer may be NULL.
520 */
521static unsigned long clk_recalc_rate(struct ccu_data *ccu,
522 struct bcm_clk_div *div, struct bcm_clk_div *pre_div,
523 unsigned long parent_rate)
524{
525 u64 scaled_parent_rate;
526 u64 scaled_div;
527 u64 result;
528
529 if (!divider_exists(div))
530 return parent_rate;
531
532 if (parent_rate > (unsigned long)LONG_MAX)
533 return 0; /* actually this would be a caller bug */
534
535 /*
536 * If there is a pre-divider, divide the scaled parent rate
537 * by the pre-divider value first. In this case--to improve
538 * accuracy--scale the parent rate by *both* the pre-divider
539 * value and the divider before actually computing the
540 * result of the pre-divider.
541 *
542 * If there's only one divider, just scale the parent rate.
543 */
544 if (pre_div && divider_exists(pre_div)) {
545 u64 scaled_rate;
546
547 scaled_rate = scale_rate(pre_div, parent_rate);
548 scaled_rate = scale_rate(div, scaled_rate);
549 scaled_div = divider_read_scaled(ccu, pre_div);
550 scaled_parent_rate = do_div_round_closest(scaled_rate,
551 scaled_div);
552 } else {
553 scaled_parent_rate = scale_rate(div, parent_rate);
554 }
555
556 /*
557 * Get the scaled divisor value, and divide the scaled
558 * parent rate by that to determine this clock's resulting
559 * rate.
560 */
561 scaled_div = divider_read_scaled(ccu, div);
562 result = do_div_round_closest(scaled_parent_rate, scaled_div);
563
564 return (unsigned long)result;
565}
566
567/*
568 * Compute the output rate produced when a given parent rate is fed
569 * into two dividers. The pre-divider can be NULL, and even if it's
570 * non-null it may be nonexistent. It's also OK for the divider to
571 * be nonexistent, and in that case the pre-divider is also ignored.
572 *
573 * If scaled_div is non-null, it is used to return the scaled divisor
574 * value used by the (downstream) divider to produce that rate.
575 */
576static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div,
577 struct bcm_clk_div *pre_div,
578 unsigned long rate, unsigned long parent_rate,
579 u64 *scaled_div)
580{
581 u64 scaled_parent_rate;
582 u64 min_scaled_div;
583 u64 max_scaled_div;
584 u64 best_scaled_div;
585 u64 result;
586
587 BUG_ON(!divider_exists(div));
588 BUG_ON(!rate);
589 BUG_ON(parent_rate > (u64)LONG_MAX);
590
591 /*
592 * If there is a pre-divider, divide the scaled parent rate
593 * by the pre-divider value first. In this case--to improve
594 * accuracy--scale the parent rate by *both* the pre-divider
595 * value and the divider before actually computing the
596 * result of the pre-divider.
597 *
598 * If there's only one divider, just scale the parent rate.
599 *
600 * For simplicity we treat the pre-divider as fixed (for now).
601 */
602 if (divider_exists(pre_div)) {
603 u64 scaled_rate;
604 u64 scaled_pre_div;
605
606 scaled_rate = scale_rate(pre_div, parent_rate);
607 scaled_rate = scale_rate(div, scaled_rate);
608 scaled_pre_div = divider_read_scaled(ccu, pre_div);
609 scaled_parent_rate = do_div_round_closest(scaled_rate,
610 scaled_pre_div);
611 } else {
612 scaled_parent_rate = scale_rate(div, parent_rate);
613 }
614
615 /*
616 * Compute the best possible divider and ensure it is in
617 * range. A fixed divider can't be changed, so just report
618 * the best we can do.
619 */
620 if (!divider_is_fixed(div)) {
621 best_scaled_div = do_div_round_closest(scaled_parent_rate,
622 rate);
623 min_scaled_div = scaled_div_min(div);
624 max_scaled_div = scaled_div_max(div);
625 if (best_scaled_div > max_scaled_div)
626 best_scaled_div = max_scaled_div;
627 else if (best_scaled_div < min_scaled_div)
628 best_scaled_div = min_scaled_div;
629 } else {
630 best_scaled_div = divider_read_scaled(ccu, div);
631 }
632
633 /* OK, figure out the resulting rate */
634 result = do_div_round_closest(scaled_parent_rate, best_scaled_div);
635
636 if (scaled_div)
637 *scaled_div = best_scaled_div;
638
639 return (long)result;
640}
641
642/* Common clock parent helpers */
643
644/*
645 * For a given parent selector (register field) value, find the
646 * index into a selector's parent_sel array that contains it.
647 * Returns the index, or BAD_CLK_INDEX if it's not found.
648 */
649static u8 parent_index(struct bcm_clk_sel *sel, u8 parent_sel)
650{
651 u8 i;
652
653 BUG_ON(sel->parent_count > (u32)U8_MAX);
654 for (i = 0; i < sel->parent_count; i++)
655 if (sel->parent_sel[i] == parent_sel)
656 return i;
657 return BAD_CLK_INDEX;
658}
659
660/*
661 * Fetch the current value of the selector, and translate that into
662 * its corresponding index in the parent array we registered with
663 * the clock framework.
664 *
665 * Returns parent array index that corresponds with the value found,
666 * or BAD_CLK_INDEX if the found value is out of range.
667 */
668static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel)
669{
670 unsigned long flags;
671 u32 reg_val;
672 u32 parent_sel;
673 u8 index;
674
675 /* If there's no selector, there's only one parent */
676 if (!selector_exists(sel))
677 return 0;
678
679 /* Get the value in the selector register */
680 flags = ccu_lock(ccu);
681 reg_val = __ccu_read(ccu, sel->offset);
682 ccu_unlock(ccu, flags);
683
684 parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
685
686 /* Look up that selector's parent array index and return it */
687 index = parent_index(sel, parent_sel);
688 if (index == BAD_CLK_INDEX)
689 pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n",
690 __func__, parent_sel, ccu->name, sel->offset);
691
692 return index;
693}
694
695/*
696 * Commit our desired selector value to the hardware.
697 *
698 * Returns 0 on success. Returns -EINVAL for invalid arguments.
699 * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
700 */
701static int
702__sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
703 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
704{
705 u32 parent_sel;
706 u32 reg_val;
707 bool enabled;
708 int ret = 0;
709
710 BUG_ON(!selector_exists(sel));
711
712 /*
713 * If we're just initializing the selector, and no initial
714 * state was defined in the device tree, we just find out
715 * what its current value is rather than updating it.
716 */
717 if (sel->clk_index == BAD_CLK_INDEX) {
718 u8 index;
719
720 reg_val = __ccu_read(ccu, sel->offset);
721 parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
722 index = parent_index(sel, parent_sel);
723 if (index == BAD_CLK_INDEX)
724 return -EINVAL;
725 sel->clk_index = index;
726
727 return 0;
728 }
729
730 BUG_ON((u32)sel->clk_index >= sel->parent_count);
731 parent_sel = sel->parent_sel[sel->clk_index];
732
733 /* Clock needs to be enabled before changing the parent */
734 enabled = __is_clk_gate_enabled(ccu, gate);
735 if (!enabled && !__clk_gate(ccu, gate, true))
736 return -ENXIO;
737
738 /* Replace the selector value and record the result */
739 reg_val = __ccu_read(ccu, sel->offset);
740 reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel);
741 __ccu_write(ccu, sel->offset, reg_val);
742
743 /* If the trigger fails we still want to disable the gate */
744 if (!__clk_trigger(ccu, trig))
745 ret = -EIO;
746
747 /* Disable the clock again if it was disabled to begin with */
748 if (!enabled && !__clk_gate(ccu, gate, false))
749 ret = ret ? ret : -ENXIO; /* return first error */
750
751 return ret;
752}
753
754/*
755 * Initialize a selector by committing our desired state to hardware
756 * without the usual checks to see if it's already set up that way.
757 * Returns true if successful, false otherwise.
758 */
759static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
760 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
761{
762 if (!selector_exists(sel))
763 return true;
764 return !__sel_commit(ccu, gate, sel, trig);
765}
766
767/*
768 * Write a new value into a selector register to switch to a
769 * different parent clock. Returns 0 on success, or an error code
770 * (from __sel_commit()) otherwise.
771 */
772static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
773 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig,
774 u8 index)
775{
776 unsigned long flags;
777 u8 previous;
778 int ret;
779
780 previous = sel->clk_index;
781 if (previous == index)
782 return 0; /* No change */
783
784 sel->clk_index = index;
785
786 flags = ccu_lock(ccu);
787 __ccu_write_enable(ccu);
788
789 ret = __sel_commit(ccu, gate, sel, trig);
790
791 __ccu_write_disable(ccu);
792 ccu_unlock(ccu, flags);
793
794 if (ret)
795 sel->clk_index = previous; /* Revert the change */
796
797 return ret;
798}
799
800/* Clock operations */
801
802static int kona_peri_clk_enable(struct clk_hw *hw)
803{
804 struct kona_clk *bcm_clk = to_kona_clk(hw);
805 struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
806
807 return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true);
808}
809
810static void kona_peri_clk_disable(struct clk_hw *hw)
811{
812 struct kona_clk *bcm_clk = to_kona_clk(hw);
813 struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
814
815 (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false);
816}
817
818static int kona_peri_clk_is_enabled(struct clk_hw *hw)
819{
820 struct kona_clk *bcm_clk = to_kona_clk(hw);
821 struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
822
823 return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
824}
825
826static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
827 unsigned long parent_rate)
828{
829 struct kona_clk *bcm_clk = to_kona_clk(hw);
830 struct peri_clk_data *data = bcm_clk->peri;
831
832 return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
833 parent_rate);
834}
835
836static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
837 unsigned long *parent_rate)
838{
839 struct kona_clk *bcm_clk = to_kona_clk(hw);
840 struct bcm_clk_div *div = &bcm_clk->peri->div;
841
842 if (!divider_exists(div))
843 return __clk_get_rate(hw->clk);
844
845 /* Quietly avoid a zero rate */
846 return round_rate(bcm_clk->ccu, div, &bcm_clk->peri->pre_div,
847 rate ? rate : 1, *parent_rate, NULL);
848}
849
850static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
851{
852 struct kona_clk *bcm_clk = to_kona_clk(hw);
853 struct peri_clk_data *data = bcm_clk->peri;
854 struct bcm_clk_sel *sel = &data->sel;
855 struct bcm_clk_trig *trig;
856 int ret;
857
858 BUG_ON(index >= sel->parent_count);
859
860 /* If there's only one parent we don't require a selector */
861 if (!selector_exists(sel))
862 return 0;
863
864 /*
865 * The regular trigger is used by default, but if there's a
866 * pre-trigger we want to use that instead.
867 */
868 trig = trigger_exists(&data->pre_trig) ? &data->pre_trig
869 : &data->trig;
870
871 ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index);
872 if (ret == -ENXIO) {
873 pr_err("%s: gating failure for %s\n", __func__, bcm_clk->name);
874 ret = -EIO; /* Don't proliferate weird errors */
875 } else if (ret == -EIO) {
876 pr_err("%s: %strigger failed for %s\n", __func__,
877 trig == &data->pre_trig ? "pre-" : "",
878 bcm_clk->name);
879 }
880
881 return ret;
882}
883
884static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
885{
886 struct kona_clk *bcm_clk = to_kona_clk(hw);
887 struct peri_clk_data *data = bcm_clk->peri;
888 u8 index;
889
890 index = selector_read_index(bcm_clk->ccu, &data->sel);
891
892 /* Not all callers would handle an out-of-range value gracefully */
893 return index == BAD_CLK_INDEX ? 0 : index;
894}
895
896static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
897 unsigned long parent_rate)
898{
899 struct kona_clk *bcm_clk = to_kona_clk(hw);
900 struct peri_clk_data *data = bcm_clk->peri;
901 struct bcm_clk_div *div = &data->div;
902 u64 scaled_div = 0;
903 int ret;
904
905 if (parent_rate > (unsigned long)LONG_MAX)
906 return -EINVAL;
907
908 if (rate == __clk_get_rate(hw->clk))
909 return 0;
910
911 if (!divider_exists(div))
912 return rate == parent_rate ? 0 : -EINVAL;
913
914 /*
915 * A fixed divider can't be changed. (Nor can a fixed
916 * pre-divider be, but for now we never actually try to
917 * change that.) Tolerate a request for a no-op change.
918 */
919 if (divider_is_fixed(&data->div))
920 return rate == parent_rate ? 0 : -EINVAL;
921
922 /*
923 * Get the scaled divisor value needed to achieve a clock
924 * rate as close as possible to what was requested, given
925 * the parent clock rate supplied.
926 */
927 (void)round_rate(bcm_clk->ccu, div, &data->pre_div,
928 rate ? rate : 1, parent_rate, &scaled_div);
929
930 /*
931 * We aren't updating any pre-divider at this point, so
932 * we'll use the regular trigger.
933 */
934 ret = divider_write(bcm_clk->ccu, &data->gate, &data->div,
935 &data->trig, scaled_div);
936 if (ret == -ENXIO) {
937 pr_err("%s: gating failure for %s\n", __func__, bcm_clk->name);
938 ret = -EIO; /* Don't proliferate weird errors */
939 } else if (ret == -EIO) {
940 pr_err("%s: trigger failed for %s\n", __func__, bcm_clk->name);
941 }
942
943 return ret;
944}
945
946struct clk_ops kona_peri_clk_ops = {
947 .enable = kona_peri_clk_enable,
948 .disable = kona_peri_clk_disable,
949 .is_enabled = kona_peri_clk_is_enabled,
950 .recalc_rate = kona_peri_clk_recalc_rate,
951 .round_rate = kona_peri_clk_round_rate,
952 .set_parent = kona_peri_clk_set_parent,
953 .get_parent = kona_peri_clk_get_parent,
954 .set_rate = kona_peri_clk_set_rate,
955};
956
957/* Put a peripheral clock into its initial state */
958static bool __peri_clk_init(struct kona_clk *bcm_clk)
959{
960 struct ccu_data *ccu = bcm_clk->ccu;
961 struct peri_clk_data *peri = bcm_clk->peri;
962 const char *name = bcm_clk->name;
963 struct bcm_clk_trig *trig;
964
965 BUG_ON(bcm_clk->type != bcm_clk_peri);
966
967 if (!gate_init(ccu, &peri->gate)) {
968 pr_err("%s: error initializing gate for %s\n", __func__, name);
969 return false;
970 }
971 if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) {
972 pr_err("%s: error initializing divider for %s\n", __func__,
973 name);
974 return false;
975 }
976
977 /*
978 * For the pre-divider and selector, the pre-trigger is used
979 * if it's present, otherwise we just use the regular trigger.
980 */
981 trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig
982 : &peri->trig;
983
984 if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) {
985 pr_err("%s: error initializing pre-divider for %s\n", __func__,
986 name);
987 return false;
988 }
989
990 if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) {
991 pr_err("%s: error initializing selector for %s\n", __func__,
992 name);
993 return false;
994 }
995
996 return true;
997}
998
999static bool __kona_clk_init(struct kona_clk *bcm_clk)
1000{
1001 switch (bcm_clk->type) {
1002 case bcm_clk_peri:
1003 return __peri_clk_init(bcm_clk);
1004 default:
1005 BUG();
1006 }
1007 return -EINVAL;
1008}
1009
1010/* Set a CCU and all its clocks into their desired initial state */
1011bool __init kona_ccu_init(struct ccu_data *ccu)
1012{
1013 unsigned long flags;
1014 unsigned int which;
1015 struct clk **clks = ccu->data.clks;
1016 bool success = true;
1017
1018 flags = ccu_lock(ccu);
1019 __ccu_write_enable(ccu);
1020
1021 for (which = 0; which < ccu->data.clk_num; which++) {
1022 struct kona_clk *bcm_clk;
1023
1024 if (!clks[which])
1025 continue;
1026 bcm_clk = to_kona_clk(__clk_get_hw(clks[which]));
1027 success &= __kona_clk_init(bcm_clk);
1028 }
1029
1030 __ccu_write_disable(ccu);
1031 ccu_unlock(ccu, flags);
1032 return success;
1033}