blob: db11a87449f236c6bc1fcf4e5f6084750a1d4f3a [file] [log] [blame]
Alex Elder1f27f152014-02-14 12:29:18 -06001/*
2 * Copyright (C) 2013 Broadcom Corporation
3 * Copyright 2013 Linaro Limited
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation version 2.
8 *
9 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
10 * kind, whether express or implied; without even the implied warranty
11 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include "clk-kona.h"
16
17#include <linux/delay.h>
18
19#define CCU_ACCESS_PASSWORD 0xA5A500
20#define CLK_GATE_DELAY_LOOP 2000
21
22/* Bitfield operations */
23
24/* Produces a mask of set bits covering a range of a 32-bit value */
25static inline u32 bitfield_mask(u32 shift, u32 width)
26{
27 return ((1 << width) - 1) << shift;
28}
29
30/* Extract the value of a bitfield found within a given register value */
31static inline u32 bitfield_extract(u32 reg_val, u32 shift, u32 width)
32{
33 return (reg_val & bitfield_mask(shift, width)) >> shift;
34}
35
36/* Replace the value of a bitfield found within a given register value */
37static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val)
38{
39 u32 mask = bitfield_mask(shift, width);
40
41 return (reg_val & ~mask) | (val << shift);
42}
43
44/* Divider and scaling helpers */
45
46/*
47 * Implement DIV_ROUND_CLOSEST() for 64-bit dividend and both values
48 * unsigned. Note that unlike do_div(), the remainder is discarded
49 * and the return value is the quotient (not the remainder).
50 */
51u64 do_div_round_closest(u64 dividend, unsigned long divisor)
52{
53 u64 result;
54
55 result = dividend + ((u64)divisor >> 1);
56 (void)do_div(result, divisor);
57
58 return result;
59}
60
61/* Convert a divider into the scaled divisor value it represents. */
62static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
63{
Alex Eldere813d492014-04-07 08:22:12 -050064 return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
Alex Elder1f27f152014-02-14 12:29:18 -060065}
66
67/*
68 * Build a scaled divider value as close as possible to the
69 * given whole part (div_value) and fractional part (expressed
70 * in billionths).
71 */
72u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
73{
74 u64 combined;
75
76 BUG_ON(!div_value);
77 BUG_ON(billionths >= BILLION);
78
79 combined = (u64)div_value * BILLION + billionths;
Alex Eldere813d492014-04-07 08:22:12 -050080 combined <<= div->u.s.frac_width;
Alex Elder1f27f152014-02-14 12:29:18 -060081
82 return do_div_round_closest(combined, BILLION);
83}
84
85/* The scaled minimum divisor representable by a divider */
86static inline u64
87scaled_div_min(struct bcm_clk_div *div)
88{
89 if (divider_is_fixed(div))
Alex Eldere813d492014-04-07 08:22:12 -050090 return (u64)div->u.fixed;
Alex Elder1f27f152014-02-14 12:29:18 -060091
92 return scaled_div_value(div, 0);
93}
94
95/* The scaled maximum divisor representable by a divider */
96u64 scaled_div_max(struct bcm_clk_div *div)
97{
98 u32 reg_div;
99
100 if (divider_is_fixed(div))
Alex Eldere813d492014-04-07 08:22:12 -0500101 return (u64)div->u.fixed;
Alex Elder1f27f152014-02-14 12:29:18 -0600102
Alex Eldere813d492014-04-07 08:22:12 -0500103 reg_div = ((u32)1 << div->u.s.width) - 1;
Alex Elder1f27f152014-02-14 12:29:18 -0600104
105 return scaled_div_value(div, reg_div);
106}
107
108/*
109 * Convert a scaled divisor into its divider representation as
110 * stored in a divider register field.
111 */
112static inline u32
113divider(struct bcm_clk_div *div, u64 scaled_div)
114{
115 BUG_ON(scaled_div < scaled_div_min(div));
116 BUG_ON(scaled_div > scaled_div_max(div));
117
Alex Eldere813d492014-04-07 08:22:12 -0500118 return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width));
Alex Elder1f27f152014-02-14 12:29:18 -0600119}
120
121/* Return a rate scaled for use when dividing by a scaled divisor. */
122static inline u64
123scale_rate(struct bcm_clk_div *div, u32 rate)
124{
125 if (divider_is_fixed(div))
126 return (u64)rate;
127
Alex Eldere813d492014-04-07 08:22:12 -0500128 return (u64)rate << div->u.s.frac_width;
Alex Elder1f27f152014-02-14 12:29:18 -0600129}
130
131/* CCU access */
132
133/* Read a 32-bit register value from a CCU's address space. */
134static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset)
135{
136 return readl(ccu->base + reg_offset);
137}
138
139/* Write a 32-bit register value into a CCU's address space. */
140static inline void
141__ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val)
142{
143 writel(reg_val, ccu->base + reg_offset);
144}
145
146static inline unsigned long ccu_lock(struct ccu_data *ccu)
147{
148 unsigned long flags;
149
150 spin_lock_irqsave(&ccu->lock, flags);
151
152 return flags;
153}
154static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags)
155{
156 spin_unlock_irqrestore(&ccu->lock, flags);
157}
158
159/*
160 * Enable/disable write access to CCU protected registers. The
161 * WR_ACCESS register for all CCUs is at offset 0.
162 */
163static inline void __ccu_write_enable(struct ccu_data *ccu)
164{
165 if (ccu->write_enabled) {
166 pr_err("%s: access already enabled for %s\n", __func__,
167 ccu->name);
168 return;
169 }
170 ccu->write_enabled = true;
171 __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1);
172}
173
174static inline void __ccu_write_disable(struct ccu_data *ccu)
175{
176 if (!ccu->write_enabled) {
177 pr_err("%s: access wasn't enabled for %s\n", __func__,
178 ccu->name);
179 return;
180 }
181
182 __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD);
183 ccu->write_enabled = false;
184}
185
186/*
187 * Poll a register in a CCU's address space, returning when the
188 * specified bit in that register's value is set (or clear). Delay
189 * a microsecond after each read of the register. Returns true if
190 * successful, or false if we gave up trying.
191 *
192 * Caller must ensure the CCU lock is held.
193 */
194static inline bool
195__ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want)
196{
197 unsigned int tries;
198 u32 bit_mask = 1 << bit;
199
200 for (tries = 0; tries < CLK_GATE_DELAY_LOOP; tries++) {
201 u32 val;
202 bool bit_val;
203
204 val = __ccu_read(ccu, reg_offset);
205 bit_val = (val & bit_mask) != 0;
206 if (bit_val == want)
207 return true;
208 udelay(1);
209 }
210 return false;
211}
212
213/* Gate operations */
214
215/* Determine whether a clock is gated. CCU lock must be held. */
216static bool
217__is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
218{
219 u32 bit_mask;
220 u32 reg_val;
221
222 /* If there is no gate we can assume it's enabled. */
223 if (!gate_exists(gate))
224 return true;
225
226 bit_mask = 1 << gate->status_bit;
227 reg_val = __ccu_read(ccu, gate->offset);
228
229 return (reg_val & bit_mask) != 0;
230}
231
232/* Determine whether a clock is gated. */
233static bool
234is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
235{
236 long flags;
237 bool ret;
238
239 /* Avoid taking the lock if we can */
240 if (!gate_exists(gate))
241 return true;
242
243 flags = ccu_lock(ccu);
244 ret = __is_clk_gate_enabled(ccu, gate);
245 ccu_unlock(ccu, flags);
246
247 return ret;
248}
249
250/*
251 * Commit our desired gate state to the hardware.
252 * Returns true if successful, false otherwise.
253 */
254static bool
255__gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate)
256{
257 u32 reg_val;
258 u32 mask;
259 bool enabled = false;
260
261 BUG_ON(!gate_exists(gate));
262 if (!gate_is_sw_controllable(gate))
263 return true; /* Nothing we can change */
264
265 reg_val = __ccu_read(ccu, gate->offset);
266
267 /* For a hardware/software gate, set which is in control */
268 if (gate_is_hw_controllable(gate)) {
269 mask = (u32)1 << gate->hw_sw_sel_bit;
270 if (gate_is_sw_managed(gate))
271 reg_val |= mask;
272 else
273 reg_val &= ~mask;
274 }
275
276 /*
277 * If software is in control, enable or disable the gate.
278 * If hardware is, clear the enabled bit for good measure.
279 * If a software controlled gate can't be disabled, we're
280 * required to write a 0 into the enable bit (but the gate
281 * will be enabled).
282 */
283 mask = (u32)1 << gate->en_bit;
284 if (gate_is_sw_managed(gate) && (enabled = gate_is_enabled(gate)) &&
285 !gate_is_no_disable(gate))
286 reg_val |= mask;
287 else
288 reg_val &= ~mask;
289
290 __ccu_write(ccu, gate->offset, reg_val);
291
292 /* For a hardware controlled gate, we're done */
293 if (!gate_is_sw_managed(gate))
294 return true;
295
296 /* Otherwise wait for the gate to be in desired state */
297 return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled);
298}
299
300/*
301 * Initialize a gate. Our desired state (hardware/software select,
302 * and if software, its enable state) is committed to hardware
303 * without the usual checks to see if it's already set up that way.
304 * Returns true if successful, false otherwise.
305 */
306static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate)
307{
308 if (!gate_exists(gate))
309 return true;
310 return __gate_commit(ccu, gate);
311}
312
313/*
314 * Set a gate to enabled or disabled state. Does nothing if the
315 * gate is not currently under software control, or if it is already
316 * in the requested state. Returns true if successful, false
317 * otherwise. CCU lock must be held.
318 */
319static bool
320__clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable)
321{
322 bool ret;
323
324 if (!gate_exists(gate) || !gate_is_sw_managed(gate))
325 return true; /* Nothing to do */
326
327 if (!enable && gate_is_no_disable(gate)) {
328 pr_warn("%s: invalid gate disable request (ignoring)\n",
329 __func__);
330 return true;
331 }
332
333 if (enable == gate_is_enabled(gate))
334 return true; /* No change */
335
336 gate_flip_enabled(gate);
337 ret = __gate_commit(ccu, gate);
338 if (!ret)
339 gate_flip_enabled(gate); /* Revert the change */
340
341 return ret;
342}
343
344/* Enable or disable a gate. Returns 0 if successful, -EIO otherwise */
345static int clk_gate(struct ccu_data *ccu, const char *name,
346 struct bcm_clk_gate *gate, bool enable)
347{
348 unsigned long flags;
349 bool success;
350
351 /*
352 * Avoid taking the lock if we can. We quietly ignore
353 * requests to change state that don't make sense.
354 */
355 if (!gate_exists(gate) || !gate_is_sw_managed(gate))
356 return 0;
357 if (!enable && gate_is_no_disable(gate))
358 return 0;
359
360 flags = ccu_lock(ccu);
361 __ccu_write_enable(ccu);
362
363 success = __clk_gate(ccu, gate, enable);
364
365 __ccu_write_disable(ccu);
366 ccu_unlock(ccu, flags);
367
368 if (success)
369 return 0;
370
371 pr_err("%s: failed to %s gate for %s\n", __func__,
372 enable ? "enable" : "disable", name);
373
374 return -EIO;
375}
376
377/* Trigger operations */
378
379/*
380 * Caller must ensure CCU lock is held and access is enabled.
381 * Returns true if successful, false otherwise.
382 */
383static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig)
384{
385 /* Trigger the clock and wait for it to finish */
386 __ccu_write(ccu, trig->offset, 1 << trig->bit);
387
388 return __ccu_wait_bit(ccu, trig->offset, trig->bit, false);
389}
390
391/* Divider operations */
392
393/* Read a divider value and return the scaled divisor it represents. */
394static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
395{
396 unsigned long flags;
397 u32 reg_val;
398 u32 reg_div;
399
400 if (divider_is_fixed(div))
Alex Eldere813d492014-04-07 08:22:12 -0500401 return (u64)div->u.fixed;
Alex Elder1f27f152014-02-14 12:29:18 -0600402
403 flags = ccu_lock(ccu);
Alex Eldere813d492014-04-07 08:22:12 -0500404 reg_val = __ccu_read(ccu, div->u.s.offset);
Alex Elder1f27f152014-02-14 12:29:18 -0600405 ccu_unlock(ccu, flags);
406
407 /* Extract the full divider field from the register value */
Alex Eldere813d492014-04-07 08:22:12 -0500408 reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width);
Alex Elder1f27f152014-02-14 12:29:18 -0600409
410 /* Return the scaled divisor value it represents */
411 return scaled_div_value(div, reg_div);
412}
413
414/*
415 * Convert a divider's scaled divisor value into its recorded form
416 * and commit it into the hardware divider register.
417 *
418 * Returns 0 on success. Returns -EINVAL for invalid arguments.
419 * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
420 */
421static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
422 struct bcm_clk_div *div, struct bcm_clk_trig *trig)
423{
424 bool enabled;
425 u32 reg_div;
426 u32 reg_val;
427 int ret = 0;
428
429 BUG_ON(divider_is_fixed(div));
430
431 /*
432 * If we're just initializing the divider, and no initial
433 * state was defined in the device tree, we just find out
434 * what its current value is rather than updating it.
435 */
Alex Eldere813d492014-04-07 08:22:12 -0500436 if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) {
437 reg_val = __ccu_read(ccu, div->u.s.offset);
438 reg_div = bitfield_extract(reg_val, div->u.s.shift,
439 div->u.s.width);
440 div->u.s.scaled_div = scaled_div_value(div, reg_div);
Alex Elder1f27f152014-02-14 12:29:18 -0600441
442 return 0;
443 }
444
445 /* Convert the scaled divisor to the value we need to record */
Alex Eldere813d492014-04-07 08:22:12 -0500446 reg_div = divider(div, div->u.s.scaled_div);
Alex Elder1f27f152014-02-14 12:29:18 -0600447
448 /* Clock needs to be enabled before changing the rate */
449 enabled = __is_clk_gate_enabled(ccu, gate);
450 if (!enabled && !__clk_gate(ccu, gate, true)) {
451 ret = -ENXIO;
452 goto out;
453 }
454
455 /* Replace the divider value and record the result */
Alex Eldere813d492014-04-07 08:22:12 -0500456 reg_val = __ccu_read(ccu, div->u.s.offset);
457 reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width,
458 reg_div);
459 __ccu_write(ccu, div->u.s.offset, reg_val);
Alex Elder1f27f152014-02-14 12:29:18 -0600460
461 /* If the trigger fails we still want to disable the gate */
462 if (!__clk_trigger(ccu, trig))
463 ret = -EIO;
464
465 /* Disable the clock again if it was disabled to begin with */
466 if (!enabled && !__clk_gate(ccu, gate, false))
467 ret = ret ? ret : -ENXIO; /* return first error */
468out:
469 return ret;
470}
471
472/*
473 * Initialize a divider by committing our desired state to hardware
474 * without the usual checks to see if it's already set up that way.
475 * Returns true if successful, false otherwise.
476 */
477static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
478 struct bcm_clk_div *div, struct bcm_clk_trig *trig)
479{
480 if (!divider_exists(div) || divider_is_fixed(div))
481 return true;
482 return !__div_commit(ccu, gate, div, trig);
483}
484
485static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
486 struct bcm_clk_div *div, struct bcm_clk_trig *trig,
487 u64 scaled_div)
488{
489 unsigned long flags;
490 u64 previous;
491 int ret;
492
493 BUG_ON(divider_is_fixed(div));
494
Alex Eldere813d492014-04-07 08:22:12 -0500495 previous = div->u.s.scaled_div;
Alex Elder1f27f152014-02-14 12:29:18 -0600496 if (previous == scaled_div)
497 return 0; /* No change */
498
Alex Eldere813d492014-04-07 08:22:12 -0500499 div->u.s.scaled_div = scaled_div;
Alex Elder1f27f152014-02-14 12:29:18 -0600500
501 flags = ccu_lock(ccu);
502 __ccu_write_enable(ccu);
503
504 ret = __div_commit(ccu, gate, div, trig);
505
506 __ccu_write_disable(ccu);
507 ccu_unlock(ccu, flags);
508
509 if (ret)
Alex Eldere813d492014-04-07 08:22:12 -0500510 div->u.s.scaled_div = previous; /* Revert the change */
Alex Elder1f27f152014-02-14 12:29:18 -0600511
512 return ret;
513
514}
515
516/* Common clock rate helpers */
517
518/*
519 * Implement the common clock framework recalc_rate method, taking
520 * into account a divider and an optional pre-divider. The
521 * pre-divider register pointer may be NULL.
522 */
523static unsigned long clk_recalc_rate(struct ccu_data *ccu,
524 struct bcm_clk_div *div, struct bcm_clk_div *pre_div,
525 unsigned long parent_rate)
526{
527 u64 scaled_parent_rate;
528 u64 scaled_div;
529 u64 result;
530
531 if (!divider_exists(div))
532 return parent_rate;
533
534 if (parent_rate > (unsigned long)LONG_MAX)
535 return 0; /* actually this would be a caller bug */
536
537 /*
538 * If there is a pre-divider, divide the scaled parent rate
539 * by the pre-divider value first. In this case--to improve
540 * accuracy--scale the parent rate by *both* the pre-divider
541 * value and the divider before actually computing the
542 * result of the pre-divider.
543 *
544 * If there's only one divider, just scale the parent rate.
545 */
546 if (pre_div && divider_exists(pre_div)) {
547 u64 scaled_rate;
548
549 scaled_rate = scale_rate(pre_div, parent_rate);
550 scaled_rate = scale_rate(div, scaled_rate);
551 scaled_div = divider_read_scaled(ccu, pre_div);
552 scaled_parent_rate = do_div_round_closest(scaled_rate,
553 scaled_div);
554 } else {
555 scaled_parent_rate = scale_rate(div, parent_rate);
556 }
557
558 /*
559 * Get the scaled divisor value, and divide the scaled
560 * parent rate by that to determine this clock's resulting
561 * rate.
562 */
563 scaled_div = divider_read_scaled(ccu, div);
564 result = do_div_round_closest(scaled_parent_rate, scaled_div);
565
566 return (unsigned long)result;
567}
568
569/*
570 * Compute the output rate produced when a given parent rate is fed
571 * into two dividers. The pre-divider can be NULL, and even if it's
572 * non-null it may be nonexistent. It's also OK for the divider to
573 * be nonexistent, and in that case the pre-divider is also ignored.
574 *
575 * If scaled_div is non-null, it is used to return the scaled divisor
576 * value used by the (downstream) divider to produce that rate.
577 */
578static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div,
579 struct bcm_clk_div *pre_div,
580 unsigned long rate, unsigned long parent_rate,
581 u64 *scaled_div)
582{
583 u64 scaled_parent_rate;
584 u64 min_scaled_div;
585 u64 max_scaled_div;
586 u64 best_scaled_div;
587 u64 result;
588
589 BUG_ON(!divider_exists(div));
590 BUG_ON(!rate);
591 BUG_ON(parent_rate > (u64)LONG_MAX);
592
593 /*
594 * If there is a pre-divider, divide the scaled parent rate
595 * by the pre-divider value first. In this case--to improve
596 * accuracy--scale the parent rate by *both* the pre-divider
597 * value and the divider before actually computing the
598 * result of the pre-divider.
599 *
600 * If there's only one divider, just scale the parent rate.
601 *
602 * For simplicity we treat the pre-divider as fixed (for now).
603 */
604 if (divider_exists(pre_div)) {
605 u64 scaled_rate;
606 u64 scaled_pre_div;
607
608 scaled_rate = scale_rate(pre_div, parent_rate);
609 scaled_rate = scale_rate(div, scaled_rate);
610 scaled_pre_div = divider_read_scaled(ccu, pre_div);
611 scaled_parent_rate = do_div_round_closest(scaled_rate,
612 scaled_pre_div);
613 } else {
614 scaled_parent_rate = scale_rate(div, parent_rate);
615 }
616
617 /*
618 * Compute the best possible divider and ensure it is in
619 * range. A fixed divider can't be changed, so just report
620 * the best we can do.
621 */
622 if (!divider_is_fixed(div)) {
623 best_scaled_div = do_div_round_closest(scaled_parent_rate,
624 rate);
625 min_scaled_div = scaled_div_min(div);
626 max_scaled_div = scaled_div_max(div);
627 if (best_scaled_div > max_scaled_div)
628 best_scaled_div = max_scaled_div;
629 else if (best_scaled_div < min_scaled_div)
630 best_scaled_div = min_scaled_div;
631 } else {
632 best_scaled_div = divider_read_scaled(ccu, div);
633 }
634
635 /* OK, figure out the resulting rate */
636 result = do_div_round_closest(scaled_parent_rate, best_scaled_div);
637
638 if (scaled_div)
639 *scaled_div = best_scaled_div;
640
641 return (long)result;
642}
643
644/* Common clock parent helpers */
645
646/*
647 * For a given parent selector (register field) value, find the
648 * index into a selector's parent_sel array that contains it.
649 * Returns the index, or BAD_CLK_INDEX if it's not found.
650 */
651static u8 parent_index(struct bcm_clk_sel *sel, u8 parent_sel)
652{
653 u8 i;
654
655 BUG_ON(sel->parent_count > (u32)U8_MAX);
656 for (i = 0; i < sel->parent_count; i++)
657 if (sel->parent_sel[i] == parent_sel)
658 return i;
659 return BAD_CLK_INDEX;
660}
661
662/*
663 * Fetch the current value of the selector, and translate that into
664 * its corresponding index in the parent array we registered with
665 * the clock framework.
666 *
667 * Returns parent array index that corresponds with the value found,
668 * or BAD_CLK_INDEX if the found value is out of range.
669 */
670static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel)
671{
672 unsigned long flags;
673 u32 reg_val;
674 u32 parent_sel;
675 u8 index;
676
677 /* If there's no selector, there's only one parent */
678 if (!selector_exists(sel))
679 return 0;
680
681 /* Get the value in the selector register */
682 flags = ccu_lock(ccu);
683 reg_val = __ccu_read(ccu, sel->offset);
684 ccu_unlock(ccu, flags);
685
686 parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
687
688 /* Look up that selector's parent array index and return it */
689 index = parent_index(sel, parent_sel);
690 if (index == BAD_CLK_INDEX)
691 pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n",
692 __func__, parent_sel, ccu->name, sel->offset);
693
694 return index;
695}
696
697/*
698 * Commit our desired selector value to the hardware.
699 *
700 * Returns 0 on success. Returns -EINVAL for invalid arguments.
701 * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
702 */
703static int
704__sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
705 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
706{
707 u32 parent_sel;
708 u32 reg_val;
709 bool enabled;
710 int ret = 0;
711
712 BUG_ON(!selector_exists(sel));
713
714 /*
715 * If we're just initializing the selector, and no initial
716 * state was defined in the device tree, we just find out
717 * what its current value is rather than updating it.
718 */
719 if (sel->clk_index == BAD_CLK_INDEX) {
720 u8 index;
721
722 reg_val = __ccu_read(ccu, sel->offset);
723 parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
724 index = parent_index(sel, parent_sel);
725 if (index == BAD_CLK_INDEX)
726 return -EINVAL;
727 sel->clk_index = index;
728
729 return 0;
730 }
731
732 BUG_ON((u32)sel->clk_index >= sel->parent_count);
733 parent_sel = sel->parent_sel[sel->clk_index];
734
735 /* Clock needs to be enabled before changing the parent */
736 enabled = __is_clk_gate_enabled(ccu, gate);
737 if (!enabled && !__clk_gate(ccu, gate, true))
738 return -ENXIO;
739
740 /* Replace the selector value and record the result */
741 reg_val = __ccu_read(ccu, sel->offset);
742 reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel);
743 __ccu_write(ccu, sel->offset, reg_val);
744
745 /* If the trigger fails we still want to disable the gate */
746 if (!__clk_trigger(ccu, trig))
747 ret = -EIO;
748
749 /* Disable the clock again if it was disabled to begin with */
750 if (!enabled && !__clk_gate(ccu, gate, false))
751 ret = ret ? ret : -ENXIO; /* return first error */
752
753 return ret;
754}
755
756/*
757 * Initialize a selector by committing our desired state to hardware
758 * without the usual checks to see if it's already set up that way.
759 * Returns true if successful, false otherwise.
760 */
761static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
762 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
763{
764 if (!selector_exists(sel))
765 return true;
766 return !__sel_commit(ccu, gate, sel, trig);
767}
768
769/*
770 * Write a new value into a selector register to switch to a
771 * different parent clock. Returns 0 on success, or an error code
772 * (from __sel_commit()) otherwise.
773 */
774static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
775 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig,
776 u8 index)
777{
778 unsigned long flags;
779 u8 previous;
780 int ret;
781
782 previous = sel->clk_index;
783 if (previous == index)
784 return 0; /* No change */
785
786 sel->clk_index = index;
787
788 flags = ccu_lock(ccu);
789 __ccu_write_enable(ccu);
790
791 ret = __sel_commit(ccu, gate, sel, trig);
792
793 __ccu_write_disable(ccu);
794 ccu_unlock(ccu, flags);
795
796 if (ret)
797 sel->clk_index = previous; /* Revert the change */
798
799 return ret;
800}
801
802/* Clock operations */
803
804static int kona_peri_clk_enable(struct clk_hw *hw)
805{
806 struct kona_clk *bcm_clk = to_kona_clk(hw);
Alex Eldere813d492014-04-07 08:22:12 -0500807 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
Alex Elder1f27f152014-02-14 12:29:18 -0600808
809 return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true);
810}
811
812static void kona_peri_clk_disable(struct clk_hw *hw)
813{
814 struct kona_clk *bcm_clk = to_kona_clk(hw);
Alex Eldere813d492014-04-07 08:22:12 -0500815 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
Alex Elder1f27f152014-02-14 12:29:18 -0600816
817 (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false);
818}
819
820static int kona_peri_clk_is_enabled(struct clk_hw *hw)
821{
822 struct kona_clk *bcm_clk = to_kona_clk(hw);
Alex Eldere813d492014-04-07 08:22:12 -0500823 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
Alex Elder1f27f152014-02-14 12:29:18 -0600824
825 return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
826}
827
828static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
829 unsigned long parent_rate)
830{
831 struct kona_clk *bcm_clk = to_kona_clk(hw);
Alex Eldere813d492014-04-07 08:22:12 -0500832 struct peri_clk_data *data = bcm_clk->u.peri;
Alex Elder1f27f152014-02-14 12:29:18 -0600833
834 return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
835 parent_rate);
836}
837
838static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
839 unsigned long *parent_rate)
840{
841 struct kona_clk *bcm_clk = to_kona_clk(hw);
Alex Eldere813d492014-04-07 08:22:12 -0500842 struct bcm_clk_div *div = &bcm_clk->u.peri->div;
Alex Elder1f27f152014-02-14 12:29:18 -0600843
844 if (!divider_exists(div))
845 return __clk_get_rate(hw->clk);
846
847 /* Quietly avoid a zero rate */
Alex Eldere813d492014-04-07 08:22:12 -0500848 return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div,
Alex Elder1f27f152014-02-14 12:29:18 -0600849 rate ? rate : 1, *parent_rate, NULL);
850}
851
852static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
853{
854 struct kona_clk *bcm_clk = to_kona_clk(hw);
Alex Eldere813d492014-04-07 08:22:12 -0500855 struct peri_clk_data *data = bcm_clk->u.peri;
Alex Elder1f27f152014-02-14 12:29:18 -0600856 struct bcm_clk_sel *sel = &data->sel;
857 struct bcm_clk_trig *trig;
858 int ret;
859
860 BUG_ON(index >= sel->parent_count);
861
862 /* If there's only one parent we don't require a selector */
863 if (!selector_exists(sel))
864 return 0;
865
866 /*
867 * The regular trigger is used by default, but if there's a
868 * pre-trigger we want to use that instead.
869 */
870 trig = trigger_exists(&data->pre_trig) ? &data->pre_trig
871 : &data->trig;
872
873 ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index);
874 if (ret == -ENXIO) {
875 pr_err("%s: gating failure for %s\n", __func__, bcm_clk->name);
876 ret = -EIO; /* Don't proliferate weird errors */
877 } else if (ret == -EIO) {
878 pr_err("%s: %strigger failed for %s\n", __func__,
879 trig == &data->pre_trig ? "pre-" : "",
880 bcm_clk->name);
881 }
882
883 return ret;
884}
885
886static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
887{
888 struct kona_clk *bcm_clk = to_kona_clk(hw);
Alex Eldere813d492014-04-07 08:22:12 -0500889 struct peri_clk_data *data = bcm_clk->u.peri;
Alex Elder1f27f152014-02-14 12:29:18 -0600890 u8 index;
891
892 index = selector_read_index(bcm_clk->ccu, &data->sel);
893
894 /* Not all callers would handle an out-of-range value gracefully */
895 return index == BAD_CLK_INDEX ? 0 : index;
896}
897
898static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
899 unsigned long parent_rate)
900{
901 struct kona_clk *bcm_clk = to_kona_clk(hw);
Alex Eldere813d492014-04-07 08:22:12 -0500902 struct peri_clk_data *data = bcm_clk->u.peri;
Alex Elder1f27f152014-02-14 12:29:18 -0600903 struct bcm_clk_div *div = &data->div;
904 u64 scaled_div = 0;
905 int ret;
906
907 if (parent_rate > (unsigned long)LONG_MAX)
908 return -EINVAL;
909
910 if (rate == __clk_get_rate(hw->clk))
911 return 0;
912
913 if (!divider_exists(div))
914 return rate == parent_rate ? 0 : -EINVAL;
915
916 /*
917 * A fixed divider can't be changed. (Nor can a fixed
918 * pre-divider be, but for now we never actually try to
919 * change that.) Tolerate a request for a no-op change.
920 */
921 if (divider_is_fixed(&data->div))
922 return rate == parent_rate ? 0 : -EINVAL;
923
924 /*
925 * Get the scaled divisor value needed to achieve a clock
926 * rate as close as possible to what was requested, given
927 * the parent clock rate supplied.
928 */
929 (void)round_rate(bcm_clk->ccu, div, &data->pre_div,
930 rate ? rate : 1, parent_rate, &scaled_div);
931
932 /*
933 * We aren't updating any pre-divider at this point, so
934 * we'll use the regular trigger.
935 */
936 ret = divider_write(bcm_clk->ccu, &data->gate, &data->div,
937 &data->trig, scaled_div);
938 if (ret == -ENXIO) {
939 pr_err("%s: gating failure for %s\n", __func__, bcm_clk->name);
940 ret = -EIO; /* Don't proliferate weird errors */
941 } else if (ret == -EIO) {
942 pr_err("%s: trigger failed for %s\n", __func__, bcm_clk->name);
943 }
944
945 return ret;
946}
947
948struct clk_ops kona_peri_clk_ops = {
949 .enable = kona_peri_clk_enable,
950 .disable = kona_peri_clk_disable,
951 .is_enabled = kona_peri_clk_is_enabled,
952 .recalc_rate = kona_peri_clk_recalc_rate,
953 .round_rate = kona_peri_clk_round_rate,
954 .set_parent = kona_peri_clk_set_parent,
955 .get_parent = kona_peri_clk_get_parent,
956 .set_rate = kona_peri_clk_set_rate,
957};
958
959/* Put a peripheral clock into its initial state */
960static bool __peri_clk_init(struct kona_clk *bcm_clk)
961{
962 struct ccu_data *ccu = bcm_clk->ccu;
Alex Eldere813d492014-04-07 08:22:12 -0500963 struct peri_clk_data *peri = bcm_clk->u.peri;
Alex Elder1f27f152014-02-14 12:29:18 -0600964 const char *name = bcm_clk->name;
965 struct bcm_clk_trig *trig;
966
967 BUG_ON(bcm_clk->type != bcm_clk_peri);
968
969 if (!gate_init(ccu, &peri->gate)) {
970 pr_err("%s: error initializing gate for %s\n", __func__, name);
971 return false;
972 }
973 if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) {
974 pr_err("%s: error initializing divider for %s\n", __func__,
975 name);
976 return false;
977 }
978
979 /*
980 * For the pre-divider and selector, the pre-trigger is used
981 * if it's present, otherwise we just use the regular trigger.
982 */
983 trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig
984 : &peri->trig;
985
986 if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) {
987 pr_err("%s: error initializing pre-divider for %s\n", __func__,
988 name);
989 return false;
990 }
991
992 if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) {
993 pr_err("%s: error initializing selector for %s\n", __func__,
994 name);
995 return false;
996 }
997
998 return true;
999}
1000
1001static bool __kona_clk_init(struct kona_clk *bcm_clk)
1002{
1003 switch (bcm_clk->type) {
1004 case bcm_clk_peri:
1005 return __peri_clk_init(bcm_clk);
1006 default:
1007 BUG();
1008 }
1009 return -EINVAL;
1010}
1011
1012/* Set a CCU and all its clocks into their desired initial state */
1013bool __init kona_ccu_init(struct ccu_data *ccu)
1014{
1015 unsigned long flags;
1016 unsigned int which;
1017 struct clk **clks = ccu->data.clks;
1018 bool success = true;
1019
1020 flags = ccu_lock(ccu);
1021 __ccu_write_enable(ccu);
1022
1023 for (which = 0; which < ccu->data.clk_num; which++) {
1024 struct kona_clk *bcm_clk;
1025
1026 if (!clks[which])
1027 continue;
1028 bcm_clk = to_kona_clk(__clk_get_hw(clks[which]));
1029 success &= __kona_clk_init(bcm_clk);
1030 }
1031
1032 __ccu_write_disable(ccu);
1033 ccu_unlock(ccu, flags);
1034 return success;
1035}