Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 1 | /* |
| 2 | * OMAP2/3/4 DPLL clock functions |
| 3 | * |
| 4 | * Copyright (C) 2005-2008 Texas Instruments, Inc. |
| 5 | * Copyright (C) 2004-2010 Nokia Corporation |
| 6 | * |
| 7 | * Contacts: |
| 8 | * Richard Woodruff <r-woodruff2@ti.com> |
| 9 | * Paul Walmsley |
| 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify |
| 12 | * it under the terms of the GNU General Public License version 2 as |
| 13 | * published by the Free Software Foundation. |
| 14 | */ |
| 15 | #undef DEBUG |
| 16 | |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/errno.h> |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 19 | #include <linux/clk-provider.h> |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 20 | #include <linux/io.h> |
| 21 | |
| 22 | #include <asm/div64.h> |
| 23 | |
Tony Lindgren | dbc0416 | 2012-08-31 10:59:07 -0700 | [diff] [blame] | 24 | #include "soc.h" |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 25 | #include "clock.h" |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 26 | #include "cm-regbits-24xx.h" |
| 27 | #include "cm-regbits-34xx.h" |
| 28 | |
| 29 | /* DPLL rate rounding: minimum DPLL multiplier, divider values */ |
Paul Walmsley | 93340a2 | 2010-02-22 22:09:12 -0700 | [diff] [blame] | 30 | #define DPLL_MIN_MULTIPLIER 2 |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 31 | #define DPLL_MIN_DIVIDER 1 |
| 32 | |
| 33 | /* Possible error results from _dpll_test_mult */ |
| 34 | #define DPLL_MULT_UNDERFLOW -1 |
| 35 | |
| 36 | /* |
| 37 | * Scale factor to mitigate roundoff errors in DPLL rate rounding. |
| 38 | * The higher the scale factor, the greater the risk of arithmetic overflow, |
| 39 | * but the closer the rounded rate to the target rate. DPLL_SCALE_FACTOR |
| 40 | * must be a power of DPLL_SCALE_BASE. |
| 41 | */ |
| 42 | #define DPLL_SCALE_FACTOR 64 |
| 43 | #define DPLL_SCALE_BASE 2 |
| 44 | #define DPLL_ROUNDING_VAL ((DPLL_SCALE_BASE / 2) * \ |
| 45 | (DPLL_SCALE_FACTOR / DPLL_SCALE_BASE)) |
| 46 | |
| 47 | /* DPLL valid Fint frequency band limits - from 34xx TRM Section 4.7.6.2 */ |
Jon Hunter | 1194d7b | 2011-10-07 01:44:20 -0600 | [diff] [blame] | 48 | #define OMAP3430_DPLL_FINT_BAND1_MIN 750000 |
| 49 | #define OMAP3430_DPLL_FINT_BAND1_MAX 2100000 |
| 50 | #define OMAP3430_DPLL_FINT_BAND2_MIN 7500000 |
| 51 | #define OMAP3430_DPLL_FINT_BAND2_MAX 21000000 |
| 52 | |
| 53 | /* |
| 54 | * DPLL valid Fint frequency range for OMAP36xx and OMAP4xxx. |
| 55 | * From device data manual section 4.3 "DPLL and DLL Specifications". |
| 56 | */ |
| 57 | #define OMAP3PLUS_DPLL_FINT_JTYPE_MIN 500000 |
| 58 | #define OMAP3PLUS_DPLL_FINT_JTYPE_MAX 2500000 |
| 59 | #define OMAP3PLUS_DPLL_FINT_MIN 32000 |
| 60 | #define OMAP3PLUS_DPLL_FINT_MAX 52000000 |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 61 | |
| 62 | /* _dpll_test_fint() return codes */ |
| 63 | #define DPLL_FINT_UNDERFLOW -1 |
| 64 | #define DPLL_FINT_INVALID -2 |
| 65 | |
| 66 | /* Private functions */ |
| 67 | |
| 68 | /* |
| 69 | * _dpll_test_fint - test whether an Fint value is valid for the DPLL |
| 70 | * @clk: DPLL struct clk to test |
| 71 | * @n: divider value (N) to test |
| 72 | * |
| 73 | * Tests whether a particular divider @n will result in a valid DPLL |
| 74 | * internal clock frequency Fint. See the 34xx TRM 4.7.6.2 "DPLL Jitter |
| 75 | * Correction". Returns 0 if OK, -1 if the enclosing loop can terminate |
| 76 | * (assuming that it is counting N upwards), or -2 if the enclosing loop |
| 77 | * should skip to the next iteration (again assuming N is increasing). |
| 78 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 79 | static int _dpll_test_fint(struct clk_hw_omap *clk, u8 n) |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 80 | { |
| 81 | struct dpll_data *dd; |
Jon Hunter | 1194d7b | 2011-10-07 01:44:20 -0600 | [diff] [blame] | 82 | long fint, fint_min, fint_max; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 83 | int ret = 0; |
| 84 | |
| 85 | dd = clk->dpll_data; |
| 86 | |
| 87 | /* DPLL divider must result in a valid jitter correction val */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 88 | fint = __clk_get_rate(__clk_get_parent(clk->hw.clk)) / n; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 89 | |
Jon Hunter | 1194d7b | 2011-10-07 01:44:20 -0600 | [diff] [blame] | 90 | if (cpu_is_omap24xx()) { |
| 91 | /* Should not be called for OMAP2, so warn if it is called */ |
| 92 | WARN(1, "No fint limits available for OMAP2!\n"); |
| 93 | return DPLL_FINT_INVALID; |
| 94 | } else if (cpu_is_omap3430()) { |
| 95 | fint_min = OMAP3430_DPLL_FINT_BAND1_MIN; |
| 96 | fint_max = OMAP3430_DPLL_FINT_BAND2_MAX; |
| 97 | } else if (dd->flags & DPLL_J_TYPE) { |
| 98 | fint_min = OMAP3PLUS_DPLL_FINT_JTYPE_MIN; |
| 99 | fint_max = OMAP3PLUS_DPLL_FINT_JTYPE_MAX; |
| 100 | } else { |
| 101 | fint_min = OMAP3PLUS_DPLL_FINT_MIN; |
| 102 | fint_max = OMAP3PLUS_DPLL_FINT_MAX; |
| 103 | } |
| 104 | |
| 105 | if (fint < fint_min) { |
Paul Walmsley | 7852ec0 | 2012-07-26 00:54:26 -0600 | [diff] [blame] | 106 | pr_debug("rejecting n=%d due to Fint failure, lowering max_divider\n", |
| 107 | n); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 108 | dd->max_divider = n; |
| 109 | ret = DPLL_FINT_UNDERFLOW; |
Jon Hunter | 1194d7b | 2011-10-07 01:44:20 -0600 | [diff] [blame] | 110 | } else if (fint > fint_max) { |
Paul Walmsley | 7852ec0 | 2012-07-26 00:54:26 -0600 | [diff] [blame] | 111 | pr_debug("rejecting n=%d due to Fint failure, boosting min_divider\n", |
| 112 | n); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 113 | dd->min_divider = n; |
| 114 | ret = DPLL_FINT_INVALID; |
Jon Hunter | 1194d7b | 2011-10-07 01:44:20 -0600 | [diff] [blame] | 115 | } else if (cpu_is_omap3430() && fint > OMAP3430_DPLL_FINT_BAND1_MAX && |
| 116 | fint < OMAP3430_DPLL_FINT_BAND2_MIN) { |
| 117 | pr_debug("rejecting n=%d due to Fint failure\n", n); |
| 118 | ret = DPLL_FINT_INVALID; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 119 | } |
| 120 | |
| 121 | return ret; |
| 122 | } |
| 123 | |
| 124 | static unsigned long _dpll_compute_new_rate(unsigned long parent_rate, |
| 125 | unsigned int m, unsigned int n) |
| 126 | { |
| 127 | unsigned long long num; |
| 128 | |
| 129 | num = (unsigned long long)parent_rate * m; |
| 130 | do_div(num, n); |
| 131 | return num; |
| 132 | } |
| 133 | |
| 134 | /* |
| 135 | * _dpll_test_mult - test a DPLL multiplier value |
| 136 | * @m: pointer to the DPLL m (multiplier) value under test |
| 137 | * @n: current DPLL n (divider) value under test |
| 138 | * @new_rate: pointer to storage for the resulting rounded rate |
| 139 | * @target_rate: the desired DPLL rate |
| 140 | * @parent_rate: the DPLL's parent clock rate |
| 141 | * |
| 142 | * This code tests a DPLL multiplier value, ensuring that the |
| 143 | * resulting rate will not be higher than the target_rate, and that |
| 144 | * the multiplier value itself is valid for the DPLL. Initially, the |
| 145 | * integer pointed to by the m argument should be prescaled by |
| 146 | * multiplying by DPLL_SCALE_FACTOR. The code will replace this with |
| 147 | * a non-scaled m upon return. This non-scaled m will result in a |
| 148 | * new_rate as close as possible to target_rate (but not greater than |
| 149 | * target_rate) given the current (parent_rate, n, prescaled m) |
| 150 | * triple. Returns DPLL_MULT_UNDERFLOW in the event that the |
| 151 | * non-scaled m attempted to underflow, which can allow the calling |
| 152 | * function to bail out early; or 0 upon success. |
| 153 | */ |
| 154 | static int _dpll_test_mult(int *m, int n, unsigned long *new_rate, |
| 155 | unsigned long target_rate, |
| 156 | unsigned long parent_rate) |
| 157 | { |
| 158 | int r = 0, carry = 0; |
| 159 | |
| 160 | /* Unscale m and round if necessary */ |
| 161 | if (*m % DPLL_SCALE_FACTOR >= DPLL_ROUNDING_VAL) |
| 162 | carry = 1; |
| 163 | *m = (*m / DPLL_SCALE_FACTOR) + carry; |
| 164 | |
| 165 | /* |
| 166 | * The new rate must be <= the target rate to avoid programming |
| 167 | * a rate that is impossible for the hardware to handle |
| 168 | */ |
| 169 | *new_rate = _dpll_compute_new_rate(parent_rate, *m, n); |
| 170 | if (*new_rate > target_rate) { |
| 171 | (*m)--; |
| 172 | *new_rate = 0; |
| 173 | } |
| 174 | |
| 175 | /* Guard against m underflow */ |
| 176 | if (*m < DPLL_MIN_MULTIPLIER) { |
| 177 | *m = DPLL_MIN_MULTIPLIER; |
| 178 | *new_rate = 0; |
| 179 | r = DPLL_MULT_UNDERFLOW; |
| 180 | } |
| 181 | |
| 182 | if (*new_rate == 0) |
| 183 | *new_rate = _dpll_compute_new_rate(parent_rate, *m, n); |
| 184 | |
| 185 | return r; |
| 186 | } |
| 187 | |
| 188 | /* Public functions */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 189 | u8 omap2_init_dpll_parent(struct clk_hw *hw) |
| 190 | { |
| 191 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 192 | u32 v; |
| 193 | struct dpll_data *dd; |
| 194 | |
| 195 | dd = clk->dpll_data; |
| 196 | if (!dd) |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 197 | return -EINVAL; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 198 | |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 199 | v = __raw_readl(dd->control_reg); |
| 200 | v &= dd->enable_mask; |
| 201 | v >>= __ffs(dd->enable_mask); |
| 202 | |
Paul Walmsley | 241d3a8 | 2011-02-16 15:38:39 -0700 | [diff] [blame] | 203 | /* Reparent the struct clk in case the dpll is in bypass */ |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 204 | if (cpu_is_omap24xx()) { |
| 205 | if (v == OMAP2XXX_EN_DPLL_LPBYPASS || |
| 206 | v == OMAP2XXX_EN_DPLL_FRBYPASS) |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 207 | return 1; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 208 | } else if (cpu_is_omap34xx()) { |
| 209 | if (v == OMAP3XXX_EN_DPLL_LPBYPASS || |
| 210 | v == OMAP3XXX_EN_DPLL_FRBYPASS) |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 211 | return 1; |
Vaibhav Hiremath | 78da264 | 2012-08-24 20:24:24 +0530 | [diff] [blame] | 212 | } else if (soc_is_am33xx() || cpu_is_omap44xx()) { |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 213 | if (v == OMAP4XXX_EN_DPLL_LPBYPASS || |
| 214 | v == OMAP4XXX_EN_DPLL_FRBYPASS || |
| 215 | v == OMAP4XXX_EN_DPLL_MNBYPASS) |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 216 | return 1; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 217 | } |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 218 | return 0; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 219 | } |
| 220 | |
| 221 | /** |
| 222 | * omap2_get_dpll_rate - returns the current DPLL CLKOUT rate |
| 223 | * @clk: struct clk * of a DPLL |
| 224 | * |
| 225 | * DPLLs can be locked or bypassed - basically, enabled or disabled. |
| 226 | * When locked, the DPLL output depends on the M and N values. When |
| 227 | * bypassed, on OMAP2xxx, the output rate is either the 32KiHz clock |
| 228 | * or sys_clk. Bypass rates on OMAP3 depend on the DPLL: DPLLs 1 and |
| 229 | * 2 are bypassed with dpll1_fclk and dpll2_fclk respectively |
| 230 | * (generated by DPLL3), while DPLL 3, 4, and 5 bypass rates are sys_clk. |
| 231 | * Returns the current DPLL CLKOUT rate (*not* CLKOUTX2) if the DPLL is |
| 232 | * locked, or the appropriate bypass rate if the DPLL is bypassed, or 0 |
| 233 | * if the clock @clk is not a DPLL. |
| 234 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 235 | unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk) |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 236 | { |
| 237 | long long dpll_clk; |
| 238 | u32 dpll_mult, dpll_div, v; |
| 239 | struct dpll_data *dd; |
| 240 | |
| 241 | dd = clk->dpll_data; |
| 242 | if (!dd) |
| 243 | return 0; |
| 244 | |
| 245 | /* Return bypass rate if DPLL is bypassed */ |
| 246 | v = __raw_readl(dd->control_reg); |
| 247 | v &= dd->enable_mask; |
| 248 | v >>= __ffs(dd->enable_mask); |
| 249 | |
| 250 | if (cpu_is_omap24xx()) { |
| 251 | if (v == OMAP2XXX_EN_DPLL_LPBYPASS || |
| 252 | v == OMAP2XXX_EN_DPLL_FRBYPASS) |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 253 | return __clk_get_rate(dd->clk_bypass); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 254 | } else if (cpu_is_omap34xx()) { |
| 255 | if (v == OMAP3XXX_EN_DPLL_LPBYPASS || |
| 256 | v == OMAP3XXX_EN_DPLL_FRBYPASS) |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 257 | return __clk_get_rate(dd->clk_bypass); |
Vaibhav Hiremath | 78da264 | 2012-08-24 20:24:24 +0530 | [diff] [blame] | 258 | } else if (soc_is_am33xx() || cpu_is_omap44xx()) { |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 259 | if (v == OMAP4XXX_EN_DPLL_LPBYPASS || |
| 260 | v == OMAP4XXX_EN_DPLL_FRBYPASS || |
| 261 | v == OMAP4XXX_EN_DPLL_MNBYPASS) |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 262 | return __clk_get_rate(dd->clk_bypass); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 263 | } |
| 264 | |
| 265 | v = __raw_readl(dd->mult_div1_reg); |
| 266 | dpll_mult = v & dd->mult_mask; |
| 267 | dpll_mult >>= __ffs(dd->mult_mask); |
| 268 | dpll_div = v & dd->div1_mask; |
| 269 | dpll_div >>= __ffs(dd->div1_mask); |
| 270 | |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 271 | dpll_clk = (long long) __clk_get_rate(dd->clk_ref) * dpll_mult; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 272 | do_div(dpll_clk, dpll_div + 1); |
| 273 | |
| 274 | return dpll_clk; |
| 275 | } |
| 276 | |
| 277 | /* DPLL rate rounding code */ |
| 278 | |
| 279 | /** |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 280 | * omap2_dpll_round_rate - round a target rate for an OMAP DPLL |
| 281 | * @clk: struct clk * for a DPLL |
| 282 | * @target_rate: desired DPLL clock rate |
| 283 | * |
Paul Walmsley | 241d3a8 | 2011-02-16 15:38:39 -0700 | [diff] [blame] | 284 | * Given a DPLL and a desired target rate, round the target rate to a |
| 285 | * possible, programmable rate for this DPLL. Attempts to select the |
| 286 | * minimum possible n. Stores the computed (m, n) in the DPLL's |
| 287 | * dpll_data structure so set_rate() will not need to call this |
| 288 | * (expensive) function again. Returns ~0 if the target rate cannot |
| 289 | * be rounded, or the rounded rate upon success. |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 290 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 291 | long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, |
| 292 | unsigned long *parent_rate) |
| 293 | { |
| 294 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
Paul Walmsley | 241d3a8 | 2011-02-16 15:38:39 -0700 | [diff] [blame] | 295 | int m, n, r, scaled_max_m; |
| 296 | unsigned long scaled_rt_rp; |
| 297 | unsigned long new_rate = 0; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 298 | struct dpll_data *dd; |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 299 | unsigned long ref_rate; |
| 300 | const char *clk_name; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 301 | |
| 302 | if (!clk || !clk->dpll_data) |
| 303 | return ~0; |
| 304 | |
| 305 | dd = clk->dpll_data; |
| 306 | |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 307 | ref_rate = __clk_get_rate(dd->clk_ref); |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 308 | clk_name = __clk_get_name(hw->clk); |
Paul Walmsley | 241d3a8 | 2011-02-16 15:38:39 -0700 | [diff] [blame] | 309 | pr_debug("clock: %s: starting DPLL round_rate, target rate %ld\n", |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 310 | clk_name, target_rate); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 311 | |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 312 | scaled_rt_rp = target_rate / (ref_rate / DPLL_SCALE_FACTOR); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 313 | scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR; |
| 314 | |
| 315 | dd->last_rounded_rate = 0; |
| 316 | |
| 317 | for (n = dd->min_divider; n <= dd->max_divider; n++) { |
| 318 | |
| 319 | /* Is the (input clk, divider) pair valid for the DPLL? */ |
| 320 | r = _dpll_test_fint(clk, n); |
| 321 | if (r == DPLL_FINT_UNDERFLOW) |
| 322 | break; |
| 323 | else if (r == DPLL_FINT_INVALID) |
| 324 | continue; |
| 325 | |
| 326 | /* Compute the scaled DPLL multiplier, based on the divider */ |
| 327 | m = scaled_rt_rp * n; |
| 328 | |
| 329 | /* |
| 330 | * Since we're counting n up, a m overflow means we |
| 331 | * can bail out completely (since as n increases in |
| 332 | * the next iteration, there's no way that m can |
| 333 | * increase beyond the current m) |
| 334 | */ |
| 335 | if (m > scaled_max_m) |
| 336 | break; |
| 337 | |
| 338 | r = _dpll_test_mult(&m, n, &new_rate, target_rate, |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 339 | ref_rate); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 340 | |
| 341 | /* m can't be set low enough for this n - try with a larger n */ |
| 342 | if (r == DPLL_MULT_UNDERFLOW) |
| 343 | continue; |
| 344 | |
Paul Walmsley | 241d3a8 | 2011-02-16 15:38:39 -0700 | [diff] [blame] | 345 | pr_debug("clock: %s: m = %d: n = %d: new_rate = %ld\n", |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 346 | clk_name, m, n, new_rate); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 347 | |
Paul Walmsley | 241d3a8 | 2011-02-16 15:38:39 -0700 | [diff] [blame] | 348 | if (target_rate == new_rate) { |
| 349 | dd->last_rounded_m = m; |
| 350 | dd->last_rounded_n = n; |
| 351 | dd->last_rounded_rate = target_rate; |
| 352 | break; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 353 | } |
| 354 | } |
| 355 | |
Paul Walmsley | 241d3a8 | 2011-02-16 15:38:39 -0700 | [diff] [blame] | 356 | if (target_rate != new_rate) { |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 357 | pr_debug("clock: %s: cannot round to rate %ld\n", |
| 358 | clk_name, target_rate); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 359 | return ~0; |
| 360 | } |
| 361 | |
Paul Walmsley | 241d3a8 | 2011-02-16 15:38:39 -0700 | [diff] [blame] | 362 | return target_rate; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 363 | } |
| 364 | |