blob: f8bc6bc0784f5e23fdb54699093aabf92afd049c [file] [log] [blame]
Alex Elder1f27f152014-02-14 12:29:18 -06001/*
2 * Copyright (C) 2013 Broadcom Corporation
3 * Copyright 2013 Linaro Limited
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation version 2.
8 *
9 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
10 * kind, whether express or implied; without even the implied warranty
11 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include "clk-kona.h"
16
17#include <linux/delay.h>
18
19#define CCU_ACCESS_PASSWORD 0xA5A500
20#define CLK_GATE_DELAY_LOOP 2000
21
22/* Bitfield operations */
23
24/* Produces a mask of set bits covering a range of a 32-bit value */
25static inline u32 bitfield_mask(u32 shift, u32 width)
26{
27 return ((1 << width) - 1) << shift;
28}
29
30/* Extract the value of a bitfield found within a given register value */
31static inline u32 bitfield_extract(u32 reg_val, u32 shift, u32 width)
32{
33 return (reg_val & bitfield_mask(shift, width)) >> shift;
34}
35
36/* Replace the value of a bitfield found within a given register value */
37static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val)
38{
39 u32 mask = bitfield_mask(shift, width);
40
41 return (reg_val & ~mask) | (val << shift);
42}
43
44/* Divider and scaling helpers */
45
46/*
47 * Implement DIV_ROUND_CLOSEST() for 64-bit dividend and both values
48 * unsigned. Note that unlike do_div(), the remainder is discarded
49 * and the return value is the quotient (not the remainder).
50 */
51u64 do_div_round_closest(u64 dividend, unsigned long divisor)
52{
53 u64 result;
54
55 result = dividend + ((u64)divisor >> 1);
56 (void)do_div(result, divisor);
57
58 return result;
59}
60
61/* Convert a divider into the scaled divisor value it represents. */
62static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
63{
Alex Eldere813d492014-04-07 08:22:12 -050064 return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
Alex Elder1f27f152014-02-14 12:29:18 -060065}
66
67/*
68 * Build a scaled divider value as close as possible to the
69 * given whole part (div_value) and fractional part (expressed
70 * in billionths).
71 */
72u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
73{
74 u64 combined;
75
76 BUG_ON(!div_value);
77 BUG_ON(billionths >= BILLION);
78
79 combined = (u64)div_value * BILLION + billionths;
Alex Eldere813d492014-04-07 08:22:12 -050080 combined <<= div->u.s.frac_width;
Alex Elder1f27f152014-02-14 12:29:18 -060081
82 return do_div_round_closest(combined, BILLION);
83}
84
85/* The scaled minimum divisor representable by a divider */
86static inline u64
87scaled_div_min(struct bcm_clk_div *div)
88{
89 if (divider_is_fixed(div))
Alex Eldere813d492014-04-07 08:22:12 -050090 return (u64)div->u.fixed;
Alex Elder1f27f152014-02-14 12:29:18 -060091
92 return scaled_div_value(div, 0);
93}
94
95/* The scaled maximum divisor representable by a divider */
96u64 scaled_div_max(struct bcm_clk_div *div)
97{
98 u32 reg_div;
99
100 if (divider_is_fixed(div))
Alex Eldere813d492014-04-07 08:22:12 -0500101 return (u64)div->u.fixed;
Alex Elder1f27f152014-02-14 12:29:18 -0600102
Alex Eldere813d492014-04-07 08:22:12 -0500103 reg_div = ((u32)1 << div->u.s.width) - 1;
Alex Elder1f27f152014-02-14 12:29:18 -0600104
105 return scaled_div_value(div, reg_div);
106}
107
108/*
109 * Convert a scaled divisor into its divider representation as
110 * stored in a divider register field.
111 */
112static inline u32
113divider(struct bcm_clk_div *div, u64 scaled_div)
114{
115 BUG_ON(scaled_div < scaled_div_min(div));
116 BUG_ON(scaled_div > scaled_div_max(div));
117
Alex Eldere813d492014-04-07 08:22:12 -0500118 return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width));
Alex Elder1f27f152014-02-14 12:29:18 -0600119}
120
121/* Return a rate scaled for use when dividing by a scaled divisor. */
122static inline u64
123scale_rate(struct bcm_clk_div *div, u32 rate)
124{
125 if (divider_is_fixed(div))
126 return (u64)rate;
127
Alex Eldere813d492014-04-07 08:22:12 -0500128 return (u64)rate << div->u.s.frac_width;
Alex Elder1f27f152014-02-14 12:29:18 -0600129}
130
131/* CCU access */
132
133/* Read a 32-bit register value from a CCU's address space. */
134static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset)
135{
136 return readl(ccu->base + reg_offset);
137}
138
139/* Write a 32-bit register value into a CCU's address space. */
140static inline void
141__ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val)
142{
143 writel(reg_val, ccu->base + reg_offset);
144}
145
146static inline unsigned long ccu_lock(struct ccu_data *ccu)
147{
148 unsigned long flags;
149
150 spin_lock_irqsave(&ccu->lock, flags);
151
152 return flags;
153}
154static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags)
155{
156 spin_unlock_irqrestore(&ccu->lock, flags);
157}
158
159/*
160 * Enable/disable write access to CCU protected registers. The
161 * WR_ACCESS register for all CCUs is at offset 0.
162 */
163static inline void __ccu_write_enable(struct ccu_data *ccu)
164{
165 if (ccu->write_enabled) {
166 pr_err("%s: access already enabled for %s\n", __func__,
167 ccu->name);
168 return;
169 }
170 ccu->write_enabled = true;
171 __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1);
172}
173
174static inline void __ccu_write_disable(struct ccu_data *ccu)
175{
176 if (!ccu->write_enabled) {
177 pr_err("%s: access wasn't enabled for %s\n", __func__,
178 ccu->name);
179 return;
180 }
181
182 __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD);
183 ccu->write_enabled = false;
184}
185
186/*
187 * Poll a register in a CCU's address space, returning when the
188 * specified bit in that register's value is set (or clear). Delay
189 * a microsecond after each read of the register. Returns true if
190 * successful, or false if we gave up trying.
191 *
192 * Caller must ensure the CCU lock is held.
193 */
194static inline bool
195__ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want)
196{
197 unsigned int tries;
198 u32 bit_mask = 1 << bit;
199
200 for (tries = 0; tries < CLK_GATE_DELAY_LOOP; tries++) {
201 u32 val;
202 bool bit_val;
203
204 val = __ccu_read(ccu, reg_offset);
205 bit_val = (val & bit_mask) != 0;
206 if (bit_val == want)
207 return true;
208 udelay(1);
209 }
Alex Elder4bac65c2014-04-21 16:11:37 -0500210 pr_warn("%s: %s/0x%04x bit %u was never %s\n", __func__,
211 ccu->name, reg_offset, bit, want ? "set" : "clear");
212
Alex Elder1f27f152014-02-14 12:29:18 -0600213 return false;
214}
215
216/* Gate operations */
217
218/* Determine whether a clock is gated. CCU lock must be held. */
219static bool
220__is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
221{
222 u32 bit_mask;
223 u32 reg_val;
224
225 /* If there is no gate we can assume it's enabled. */
226 if (!gate_exists(gate))
227 return true;
228
229 bit_mask = 1 << gate->status_bit;
230 reg_val = __ccu_read(ccu, gate->offset);
231
232 return (reg_val & bit_mask) != 0;
233}
234
235/* Determine whether a clock is gated. */
236static bool
237is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
238{
239 long flags;
240 bool ret;
241
242 /* Avoid taking the lock if we can */
243 if (!gate_exists(gate))
244 return true;
245
246 flags = ccu_lock(ccu);
247 ret = __is_clk_gate_enabled(ccu, gate);
248 ccu_unlock(ccu, flags);
249
250 return ret;
251}
252
253/*
254 * Commit our desired gate state to the hardware.
255 * Returns true if successful, false otherwise.
256 */
257static bool
258__gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate)
259{
260 u32 reg_val;
261 u32 mask;
262 bool enabled = false;
263
264 BUG_ON(!gate_exists(gate));
265 if (!gate_is_sw_controllable(gate))
266 return true; /* Nothing we can change */
267
268 reg_val = __ccu_read(ccu, gate->offset);
269
270 /* For a hardware/software gate, set which is in control */
271 if (gate_is_hw_controllable(gate)) {
272 mask = (u32)1 << gate->hw_sw_sel_bit;
273 if (gate_is_sw_managed(gate))
274 reg_val |= mask;
275 else
276 reg_val &= ~mask;
277 }
278
279 /*
280 * If software is in control, enable or disable the gate.
281 * If hardware is, clear the enabled bit for good measure.
282 * If a software controlled gate can't be disabled, we're
283 * required to write a 0 into the enable bit (but the gate
284 * will be enabled).
285 */
286 mask = (u32)1 << gate->en_bit;
287 if (gate_is_sw_managed(gate) && (enabled = gate_is_enabled(gate)) &&
288 !gate_is_no_disable(gate))
289 reg_val |= mask;
290 else
291 reg_val &= ~mask;
292
293 __ccu_write(ccu, gate->offset, reg_val);
294
295 /* For a hardware controlled gate, we're done */
296 if (!gate_is_sw_managed(gate))
297 return true;
298
299 /* Otherwise wait for the gate to be in desired state */
300 return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled);
301}
302
303/*
304 * Initialize a gate. Our desired state (hardware/software select,
305 * and if software, its enable state) is committed to hardware
306 * without the usual checks to see if it's already set up that way.
307 * Returns true if successful, false otherwise.
308 */
309static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate)
310{
311 if (!gate_exists(gate))
312 return true;
313 return __gate_commit(ccu, gate);
314}
315
316/*
317 * Set a gate to enabled or disabled state. Does nothing if the
318 * gate is not currently under software control, or if it is already
319 * in the requested state. Returns true if successful, false
320 * otherwise. CCU lock must be held.
321 */
322static bool
323__clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable)
324{
325 bool ret;
326
327 if (!gate_exists(gate) || !gate_is_sw_managed(gate))
328 return true; /* Nothing to do */
329
330 if (!enable && gate_is_no_disable(gate)) {
331 pr_warn("%s: invalid gate disable request (ignoring)\n",
332 __func__);
333 return true;
334 }
335
336 if (enable == gate_is_enabled(gate))
337 return true; /* No change */
338
339 gate_flip_enabled(gate);
340 ret = __gate_commit(ccu, gate);
341 if (!ret)
342 gate_flip_enabled(gate); /* Revert the change */
343
344 return ret;
345}
346
347/* Enable or disable a gate. Returns 0 if successful, -EIO otherwise */
348static int clk_gate(struct ccu_data *ccu, const char *name,
349 struct bcm_clk_gate *gate, bool enable)
350{
351 unsigned long flags;
352 bool success;
353
354 /*
355 * Avoid taking the lock if we can. We quietly ignore
356 * requests to change state that don't make sense.
357 */
358 if (!gate_exists(gate) || !gate_is_sw_managed(gate))
359 return 0;
360 if (!enable && gate_is_no_disable(gate))
361 return 0;
362
363 flags = ccu_lock(ccu);
364 __ccu_write_enable(ccu);
365
366 success = __clk_gate(ccu, gate, enable);
367
368 __ccu_write_disable(ccu);
369 ccu_unlock(ccu, flags);
370
371 if (success)
372 return 0;
373
374 pr_err("%s: failed to %s gate for %s\n", __func__,
375 enable ? "enable" : "disable", name);
376
377 return -EIO;
378}
379
380/* Trigger operations */
381
382/*
383 * Caller must ensure CCU lock is held and access is enabled.
384 * Returns true if successful, false otherwise.
385 */
386static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig)
387{
388 /* Trigger the clock and wait for it to finish */
389 __ccu_write(ccu, trig->offset, 1 << trig->bit);
390
391 return __ccu_wait_bit(ccu, trig->offset, trig->bit, false);
392}
393
394/* Divider operations */
395
396/* Read a divider value and return the scaled divisor it represents. */
397static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
398{
399 unsigned long flags;
400 u32 reg_val;
401 u32 reg_div;
402
403 if (divider_is_fixed(div))
Alex Eldere813d492014-04-07 08:22:12 -0500404 return (u64)div->u.fixed;
Alex Elder1f27f152014-02-14 12:29:18 -0600405
406 flags = ccu_lock(ccu);
Alex Eldere813d492014-04-07 08:22:12 -0500407 reg_val = __ccu_read(ccu, div->u.s.offset);
Alex Elder1f27f152014-02-14 12:29:18 -0600408 ccu_unlock(ccu, flags);
409
410 /* Extract the full divider field from the register value */
Alex Eldere813d492014-04-07 08:22:12 -0500411 reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width);
Alex Elder1f27f152014-02-14 12:29:18 -0600412
413 /* Return the scaled divisor value it represents */
414 return scaled_div_value(div, reg_div);
415}
416
417/*
418 * Convert a divider's scaled divisor value into its recorded form
419 * and commit it into the hardware divider register.
420 *
421 * Returns 0 on success. Returns -EINVAL for invalid arguments.
422 * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
423 */
424static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
425 struct bcm_clk_div *div, struct bcm_clk_trig *trig)
426{
427 bool enabled;
428 u32 reg_div;
429 u32 reg_val;
430 int ret = 0;
431
432 BUG_ON(divider_is_fixed(div));
433
434 /*
435 * If we're just initializing the divider, and no initial
436 * state was defined in the device tree, we just find out
437 * what its current value is rather than updating it.
438 */
Alex Eldere813d492014-04-07 08:22:12 -0500439 if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) {
440 reg_val = __ccu_read(ccu, div->u.s.offset);
441 reg_div = bitfield_extract(reg_val, div->u.s.shift,
442 div->u.s.width);
443 div->u.s.scaled_div = scaled_div_value(div, reg_div);
Alex Elder1f27f152014-02-14 12:29:18 -0600444
445 return 0;
446 }
447
448 /* Convert the scaled divisor to the value we need to record */
Alex Eldere813d492014-04-07 08:22:12 -0500449 reg_div = divider(div, div->u.s.scaled_div);
Alex Elder1f27f152014-02-14 12:29:18 -0600450
451 /* Clock needs to be enabled before changing the rate */
452 enabled = __is_clk_gate_enabled(ccu, gate);
453 if (!enabled && !__clk_gate(ccu, gate, true)) {
454 ret = -ENXIO;
455 goto out;
456 }
457
458 /* Replace the divider value and record the result */
Alex Eldere813d492014-04-07 08:22:12 -0500459 reg_val = __ccu_read(ccu, div->u.s.offset);
460 reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width,
461 reg_div);
462 __ccu_write(ccu, div->u.s.offset, reg_val);
Alex Elder1f27f152014-02-14 12:29:18 -0600463
464 /* If the trigger fails we still want to disable the gate */
465 if (!__clk_trigger(ccu, trig))
466 ret = -EIO;
467
468 /* Disable the clock again if it was disabled to begin with */
469 if (!enabled && !__clk_gate(ccu, gate, false))
470 ret = ret ? ret : -ENXIO; /* return first error */
471out:
472 return ret;
473}
474
475/*
476 * Initialize a divider by committing our desired state to hardware
477 * without the usual checks to see if it's already set up that way.
478 * Returns true if successful, false otherwise.
479 */
480static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
481 struct bcm_clk_div *div, struct bcm_clk_trig *trig)
482{
483 if (!divider_exists(div) || divider_is_fixed(div))
484 return true;
485 return !__div_commit(ccu, gate, div, trig);
486}
487
488static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
489 struct bcm_clk_div *div, struct bcm_clk_trig *trig,
490 u64 scaled_div)
491{
492 unsigned long flags;
493 u64 previous;
494 int ret;
495
496 BUG_ON(divider_is_fixed(div));
497
Alex Eldere813d492014-04-07 08:22:12 -0500498 previous = div->u.s.scaled_div;
Alex Elder1f27f152014-02-14 12:29:18 -0600499 if (previous == scaled_div)
500 return 0; /* No change */
501
Alex Eldere813d492014-04-07 08:22:12 -0500502 div->u.s.scaled_div = scaled_div;
Alex Elder1f27f152014-02-14 12:29:18 -0600503
504 flags = ccu_lock(ccu);
505 __ccu_write_enable(ccu);
506
507 ret = __div_commit(ccu, gate, div, trig);
508
509 __ccu_write_disable(ccu);
510 ccu_unlock(ccu, flags);
511
512 if (ret)
Alex Eldere813d492014-04-07 08:22:12 -0500513 div->u.s.scaled_div = previous; /* Revert the change */
Alex Elder1f27f152014-02-14 12:29:18 -0600514
515 return ret;
516
517}
518
519/* Common clock rate helpers */
520
521/*
522 * Implement the common clock framework recalc_rate method, taking
523 * into account a divider and an optional pre-divider. The
524 * pre-divider register pointer may be NULL.
525 */
526static unsigned long clk_recalc_rate(struct ccu_data *ccu,
527 struct bcm_clk_div *div, struct bcm_clk_div *pre_div,
528 unsigned long parent_rate)
529{
530 u64 scaled_parent_rate;
531 u64 scaled_div;
532 u64 result;
533
534 if (!divider_exists(div))
535 return parent_rate;
536
537 if (parent_rate > (unsigned long)LONG_MAX)
538 return 0; /* actually this would be a caller bug */
539
540 /*
541 * If there is a pre-divider, divide the scaled parent rate
542 * by the pre-divider value first. In this case--to improve
543 * accuracy--scale the parent rate by *both* the pre-divider
544 * value and the divider before actually computing the
545 * result of the pre-divider.
546 *
547 * If there's only one divider, just scale the parent rate.
548 */
549 if (pre_div && divider_exists(pre_div)) {
550 u64 scaled_rate;
551
552 scaled_rate = scale_rate(pre_div, parent_rate);
553 scaled_rate = scale_rate(div, scaled_rate);
554 scaled_div = divider_read_scaled(ccu, pre_div);
555 scaled_parent_rate = do_div_round_closest(scaled_rate,
556 scaled_div);
557 } else {
558 scaled_parent_rate = scale_rate(div, parent_rate);
559 }
560
561 /*
562 * Get the scaled divisor value, and divide the scaled
563 * parent rate by that to determine this clock's resulting
564 * rate.
565 */
566 scaled_div = divider_read_scaled(ccu, div);
567 result = do_div_round_closest(scaled_parent_rate, scaled_div);
568
569 return (unsigned long)result;
570}
571
572/*
573 * Compute the output rate produced when a given parent rate is fed
574 * into two dividers. The pre-divider can be NULL, and even if it's
575 * non-null it may be nonexistent. It's also OK for the divider to
576 * be nonexistent, and in that case the pre-divider is also ignored.
577 *
578 * If scaled_div is non-null, it is used to return the scaled divisor
579 * value used by the (downstream) divider to produce that rate.
580 */
581static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div,
582 struct bcm_clk_div *pre_div,
583 unsigned long rate, unsigned long parent_rate,
584 u64 *scaled_div)
585{
586 u64 scaled_parent_rate;
587 u64 min_scaled_div;
588 u64 max_scaled_div;
589 u64 best_scaled_div;
590 u64 result;
591
592 BUG_ON(!divider_exists(div));
593 BUG_ON(!rate);
594 BUG_ON(parent_rate > (u64)LONG_MAX);
595
596 /*
597 * If there is a pre-divider, divide the scaled parent rate
598 * by the pre-divider value first. In this case--to improve
599 * accuracy--scale the parent rate by *both* the pre-divider
600 * value and the divider before actually computing the
601 * result of the pre-divider.
602 *
603 * If there's only one divider, just scale the parent rate.
604 *
605 * For simplicity we treat the pre-divider as fixed (for now).
606 */
607 if (divider_exists(pre_div)) {
608 u64 scaled_rate;
609 u64 scaled_pre_div;
610
611 scaled_rate = scale_rate(pre_div, parent_rate);
612 scaled_rate = scale_rate(div, scaled_rate);
613 scaled_pre_div = divider_read_scaled(ccu, pre_div);
614 scaled_parent_rate = do_div_round_closest(scaled_rate,
615 scaled_pre_div);
616 } else {
617 scaled_parent_rate = scale_rate(div, parent_rate);
618 }
619
620 /*
621 * Compute the best possible divider and ensure it is in
622 * range. A fixed divider can't be changed, so just report
623 * the best we can do.
624 */
625 if (!divider_is_fixed(div)) {
626 best_scaled_div = do_div_round_closest(scaled_parent_rate,
627 rate);
628 min_scaled_div = scaled_div_min(div);
629 max_scaled_div = scaled_div_max(div);
630 if (best_scaled_div > max_scaled_div)
631 best_scaled_div = max_scaled_div;
632 else if (best_scaled_div < min_scaled_div)
633 best_scaled_div = min_scaled_div;
634 } else {
635 best_scaled_div = divider_read_scaled(ccu, div);
636 }
637
638 /* OK, figure out the resulting rate */
639 result = do_div_round_closest(scaled_parent_rate, best_scaled_div);
640
641 if (scaled_div)
642 *scaled_div = best_scaled_div;
643
644 return (long)result;
645}
646
647/* Common clock parent helpers */
648
649/*
650 * For a given parent selector (register field) value, find the
651 * index into a selector's parent_sel array that contains it.
652 * Returns the index, or BAD_CLK_INDEX if it's not found.
653 */
654static u8 parent_index(struct bcm_clk_sel *sel, u8 parent_sel)
655{
656 u8 i;
657
658 BUG_ON(sel->parent_count > (u32)U8_MAX);
659 for (i = 0; i < sel->parent_count; i++)
660 if (sel->parent_sel[i] == parent_sel)
661 return i;
662 return BAD_CLK_INDEX;
663}
664
665/*
666 * Fetch the current value of the selector, and translate that into
667 * its corresponding index in the parent array we registered with
668 * the clock framework.
669 *
670 * Returns parent array index that corresponds with the value found,
671 * or BAD_CLK_INDEX if the found value is out of range.
672 */
673static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel)
674{
675 unsigned long flags;
676 u32 reg_val;
677 u32 parent_sel;
678 u8 index;
679
680 /* If there's no selector, there's only one parent */
681 if (!selector_exists(sel))
682 return 0;
683
684 /* Get the value in the selector register */
685 flags = ccu_lock(ccu);
686 reg_val = __ccu_read(ccu, sel->offset);
687 ccu_unlock(ccu, flags);
688
689 parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
690
691 /* Look up that selector's parent array index and return it */
692 index = parent_index(sel, parent_sel);
693 if (index == BAD_CLK_INDEX)
694 pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n",
695 __func__, parent_sel, ccu->name, sel->offset);
696
697 return index;
698}
699
700/*
701 * Commit our desired selector value to the hardware.
702 *
703 * Returns 0 on success. Returns -EINVAL for invalid arguments.
704 * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
705 */
706static int
707__sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
708 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
709{
710 u32 parent_sel;
711 u32 reg_val;
712 bool enabled;
713 int ret = 0;
714
715 BUG_ON(!selector_exists(sel));
716
717 /*
718 * If we're just initializing the selector, and no initial
719 * state was defined in the device tree, we just find out
720 * what its current value is rather than updating it.
721 */
722 if (sel->clk_index == BAD_CLK_INDEX) {
723 u8 index;
724
725 reg_val = __ccu_read(ccu, sel->offset);
726 parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
727 index = parent_index(sel, parent_sel);
728 if (index == BAD_CLK_INDEX)
729 return -EINVAL;
730 sel->clk_index = index;
731
732 return 0;
733 }
734
735 BUG_ON((u32)sel->clk_index >= sel->parent_count);
736 parent_sel = sel->parent_sel[sel->clk_index];
737
738 /* Clock needs to be enabled before changing the parent */
739 enabled = __is_clk_gate_enabled(ccu, gate);
740 if (!enabled && !__clk_gate(ccu, gate, true))
741 return -ENXIO;
742
743 /* Replace the selector value and record the result */
744 reg_val = __ccu_read(ccu, sel->offset);
745 reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel);
746 __ccu_write(ccu, sel->offset, reg_val);
747
748 /* If the trigger fails we still want to disable the gate */
749 if (!__clk_trigger(ccu, trig))
750 ret = -EIO;
751
752 /* Disable the clock again if it was disabled to begin with */
753 if (!enabled && !__clk_gate(ccu, gate, false))
754 ret = ret ? ret : -ENXIO; /* return first error */
755
756 return ret;
757}
758
759/*
760 * Initialize a selector by committing our desired state to hardware
761 * without the usual checks to see if it's already set up that way.
762 * Returns true if successful, false otherwise.
763 */
764static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
765 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
766{
767 if (!selector_exists(sel))
768 return true;
769 return !__sel_commit(ccu, gate, sel, trig);
770}
771
772/*
773 * Write a new value into a selector register to switch to a
774 * different parent clock. Returns 0 on success, or an error code
775 * (from __sel_commit()) otherwise.
776 */
777static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
778 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig,
779 u8 index)
780{
781 unsigned long flags;
782 u8 previous;
783 int ret;
784
785 previous = sel->clk_index;
786 if (previous == index)
787 return 0; /* No change */
788
789 sel->clk_index = index;
790
791 flags = ccu_lock(ccu);
792 __ccu_write_enable(ccu);
793
794 ret = __sel_commit(ccu, gate, sel, trig);
795
796 __ccu_write_disable(ccu);
797 ccu_unlock(ccu, flags);
798
799 if (ret)
800 sel->clk_index = previous; /* Revert the change */
801
802 return ret;
803}
804
805/* Clock operations */
806
807static int kona_peri_clk_enable(struct clk_hw *hw)
808{
809 struct kona_clk *bcm_clk = to_kona_clk(hw);
Alex Eldere813d492014-04-07 08:22:12 -0500810 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
Alex Elder1f27f152014-02-14 12:29:18 -0600811
Alex Eldere7563252014-04-21 16:11:38 -0500812 return clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, true);
Alex Elder1f27f152014-02-14 12:29:18 -0600813}
814
815static void kona_peri_clk_disable(struct clk_hw *hw)
816{
817 struct kona_clk *bcm_clk = to_kona_clk(hw);
Alex Eldere813d492014-04-07 08:22:12 -0500818 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
Alex Elder1f27f152014-02-14 12:29:18 -0600819
Alex Eldere7563252014-04-21 16:11:38 -0500820 (void)clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, false);
Alex Elder1f27f152014-02-14 12:29:18 -0600821}
822
823static int kona_peri_clk_is_enabled(struct clk_hw *hw)
824{
825 struct kona_clk *bcm_clk = to_kona_clk(hw);
Alex Eldere813d492014-04-07 08:22:12 -0500826 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
Alex Elder1f27f152014-02-14 12:29:18 -0600827
828 return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
829}
830
831static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
832 unsigned long parent_rate)
833{
834 struct kona_clk *bcm_clk = to_kona_clk(hw);
Alex Eldere813d492014-04-07 08:22:12 -0500835 struct peri_clk_data *data = bcm_clk->u.peri;
Alex Elder1f27f152014-02-14 12:29:18 -0600836
837 return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
838 parent_rate);
839}
840
841static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
842 unsigned long *parent_rate)
843{
844 struct kona_clk *bcm_clk = to_kona_clk(hw);
Alex Eldere813d492014-04-07 08:22:12 -0500845 struct bcm_clk_div *div = &bcm_clk->u.peri->div;
Alex Elder1f27f152014-02-14 12:29:18 -0600846
847 if (!divider_exists(div))
848 return __clk_get_rate(hw->clk);
849
850 /* Quietly avoid a zero rate */
Alex Eldere813d492014-04-07 08:22:12 -0500851 return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div,
Alex Elder1f27f152014-02-14 12:29:18 -0600852 rate ? rate : 1, *parent_rate, NULL);
853}
854
855static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
856{
857 struct kona_clk *bcm_clk = to_kona_clk(hw);
Alex Eldere813d492014-04-07 08:22:12 -0500858 struct peri_clk_data *data = bcm_clk->u.peri;
Alex Elder1f27f152014-02-14 12:29:18 -0600859 struct bcm_clk_sel *sel = &data->sel;
860 struct bcm_clk_trig *trig;
861 int ret;
862
863 BUG_ON(index >= sel->parent_count);
864
865 /* If there's only one parent we don't require a selector */
866 if (!selector_exists(sel))
867 return 0;
868
869 /*
870 * The regular trigger is used by default, but if there's a
871 * pre-trigger we want to use that instead.
872 */
873 trig = trigger_exists(&data->pre_trig) ? &data->pre_trig
874 : &data->trig;
875
876 ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index);
877 if (ret == -ENXIO) {
Alex Eldere7563252014-04-21 16:11:38 -0500878 pr_err("%s: gating failure for %s\n", __func__,
879 bcm_clk->init_data.name);
Alex Elder1f27f152014-02-14 12:29:18 -0600880 ret = -EIO; /* Don't proliferate weird errors */
881 } else if (ret == -EIO) {
882 pr_err("%s: %strigger failed for %s\n", __func__,
883 trig == &data->pre_trig ? "pre-" : "",
Alex Eldere7563252014-04-21 16:11:38 -0500884 bcm_clk->init_data.name);
Alex Elder1f27f152014-02-14 12:29:18 -0600885 }
886
887 return ret;
888}
889
890static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
891{
892 struct kona_clk *bcm_clk = to_kona_clk(hw);
Alex Eldere813d492014-04-07 08:22:12 -0500893 struct peri_clk_data *data = bcm_clk->u.peri;
Alex Elder1f27f152014-02-14 12:29:18 -0600894 u8 index;
895
896 index = selector_read_index(bcm_clk->ccu, &data->sel);
897
898 /* Not all callers would handle an out-of-range value gracefully */
899 return index == BAD_CLK_INDEX ? 0 : index;
900}
901
902static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
903 unsigned long parent_rate)
904{
905 struct kona_clk *bcm_clk = to_kona_clk(hw);
Alex Eldere813d492014-04-07 08:22:12 -0500906 struct peri_clk_data *data = bcm_clk->u.peri;
Alex Elder1f27f152014-02-14 12:29:18 -0600907 struct bcm_clk_div *div = &data->div;
908 u64 scaled_div = 0;
909 int ret;
910
911 if (parent_rate > (unsigned long)LONG_MAX)
912 return -EINVAL;
913
914 if (rate == __clk_get_rate(hw->clk))
915 return 0;
916
917 if (!divider_exists(div))
918 return rate == parent_rate ? 0 : -EINVAL;
919
920 /*
921 * A fixed divider can't be changed. (Nor can a fixed
922 * pre-divider be, but for now we never actually try to
923 * change that.) Tolerate a request for a no-op change.
924 */
925 if (divider_is_fixed(&data->div))
926 return rate == parent_rate ? 0 : -EINVAL;
927
928 /*
929 * Get the scaled divisor value needed to achieve a clock
930 * rate as close as possible to what was requested, given
931 * the parent clock rate supplied.
932 */
933 (void)round_rate(bcm_clk->ccu, div, &data->pre_div,
934 rate ? rate : 1, parent_rate, &scaled_div);
935
936 /*
937 * We aren't updating any pre-divider at this point, so
938 * we'll use the regular trigger.
939 */
940 ret = divider_write(bcm_clk->ccu, &data->gate, &data->div,
941 &data->trig, scaled_div);
942 if (ret == -ENXIO) {
Alex Eldere7563252014-04-21 16:11:38 -0500943 pr_err("%s: gating failure for %s\n", __func__,
944 bcm_clk->init_data.name);
Alex Elder1f27f152014-02-14 12:29:18 -0600945 ret = -EIO; /* Don't proliferate weird errors */
946 } else if (ret == -EIO) {
Alex Eldere7563252014-04-21 16:11:38 -0500947 pr_err("%s: trigger failed for %s\n", __func__,
948 bcm_clk->init_data.name);
Alex Elder1f27f152014-02-14 12:29:18 -0600949 }
950
951 return ret;
952}
953
954struct clk_ops kona_peri_clk_ops = {
955 .enable = kona_peri_clk_enable,
956 .disable = kona_peri_clk_disable,
957 .is_enabled = kona_peri_clk_is_enabled,
958 .recalc_rate = kona_peri_clk_recalc_rate,
959 .round_rate = kona_peri_clk_round_rate,
960 .set_parent = kona_peri_clk_set_parent,
961 .get_parent = kona_peri_clk_get_parent,
962 .set_rate = kona_peri_clk_set_rate,
963};
964
965/* Put a peripheral clock into its initial state */
966static bool __peri_clk_init(struct kona_clk *bcm_clk)
967{
968 struct ccu_data *ccu = bcm_clk->ccu;
Alex Eldere813d492014-04-07 08:22:12 -0500969 struct peri_clk_data *peri = bcm_clk->u.peri;
Alex Eldere7563252014-04-21 16:11:38 -0500970 const char *name = bcm_clk->init_data.name;
Alex Elder1f27f152014-02-14 12:29:18 -0600971 struct bcm_clk_trig *trig;
972
973 BUG_ON(bcm_clk->type != bcm_clk_peri);
974
975 if (!gate_init(ccu, &peri->gate)) {
976 pr_err("%s: error initializing gate for %s\n", __func__, name);
977 return false;
978 }
979 if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) {
980 pr_err("%s: error initializing divider for %s\n", __func__,
981 name);
982 return false;
983 }
984
985 /*
986 * For the pre-divider and selector, the pre-trigger is used
987 * if it's present, otherwise we just use the regular trigger.
988 */
989 trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig
990 : &peri->trig;
991
992 if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) {
993 pr_err("%s: error initializing pre-divider for %s\n", __func__,
994 name);
995 return false;
996 }
997
998 if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) {
999 pr_err("%s: error initializing selector for %s\n", __func__,
1000 name);
1001 return false;
1002 }
1003
1004 return true;
1005}
1006
1007static bool __kona_clk_init(struct kona_clk *bcm_clk)
1008{
1009 switch (bcm_clk->type) {
1010 case bcm_clk_peri:
1011 return __peri_clk_init(bcm_clk);
1012 default:
1013 BUG();
1014 }
1015 return -EINVAL;
1016}
1017
1018/* Set a CCU and all its clocks into their desired initial state */
1019bool __init kona_ccu_init(struct ccu_data *ccu)
1020{
1021 unsigned long flags;
1022 unsigned int which;
Alex Elderb12151c2014-04-21 16:11:40 -05001023 struct clk **clks = ccu->clk_data.clks;
Alex Elder1f27f152014-02-14 12:29:18 -06001024 bool success = true;
1025
1026 flags = ccu_lock(ccu);
1027 __ccu_write_enable(ccu);
1028
Alex Elderb12151c2014-04-21 16:11:40 -05001029 for (which = 0; which < ccu->clk_data.clk_num; which++) {
Alex Elder1f27f152014-02-14 12:29:18 -06001030 struct kona_clk *bcm_clk;
1031
1032 if (!clks[which])
1033 continue;
1034 bcm_clk = to_kona_clk(__clk_get_hw(clks[which]));
1035 success &= __kona_clk_init(bcm_clk);
1036 }
1037
1038 __ccu_write_disable(ccu);
1039 ccu_unlock(ccu, flags);
1040 return success;
1041}