Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 1 | /* |
| 2 | * OMAP3/4 - specific DPLL control functions |
| 3 | * |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 4 | * Copyright (C) 2009-2010 Texas Instruments, Inc. |
| 5 | * Copyright (C) 2009-2010 Nokia Corporation |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 6 | * |
| 7 | * Written by Paul Walmsley |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 8 | * Testing and integration fixes by Jouni Högander |
| 9 | * |
| 10 | * 36xx support added by Vishwanath BS, Richard Woodruff, and Nishanth |
| 11 | * Menon |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 12 | * |
| 13 | * Parts of this code are based on code written by |
| 14 | * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu |
| 15 | * |
| 16 | * This program is free software; you can redistribute it and/or modify |
| 17 | * it under the terms of the GNU General Public License version 2 as |
| 18 | * published by the Free Software Foundation. |
| 19 | */ |
| 20 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 21 | #include <linux/kernel.h> |
| 22 | #include <linux/device.h> |
| 23 | #include <linux/list.h> |
| 24 | #include <linux/errno.h> |
| 25 | #include <linux/delay.h> |
| 26 | #include <linux/clk.h> |
| 27 | #include <linux/io.h> |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 28 | #include <linux/bitops.h> |
Jean-Christop PLAGNIOL-VILLARD | 6d803ba | 2010-11-17 10:04:33 +0100 | [diff] [blame] | 29 | #include <linux/clkdev.h> |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 30 | |
Tony Lindgren | dbc0416 | 2012-08-31 10:59:07 -0700 | [diff] [blame] | 31 | #include "soc.h" |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 32 | #include "clockdomain.h" |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 33 | #include "clock.h" |
Paul Walmsley | 59fb659 | 2010-12-21 15:30:55 -0700 | [diff] [blame] | 34 | #include "cm2xxx_3xxx.h" |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 35 | #include "cm-regbits-34xx.h" |
| 36 | |
| 37 | /* CM_AUTOIDLE_PLL*.AUTO_* bit values */ |
| 38 | #define DPLL_AUTOIDLE_DISABLE 0x0 |
| 39 | #define DPLL_AUTOIDLE_LOW_POWER_STOP 0x1 |
| 40 | |
| 41 | #define MAX_DPLL_WAIT_TRIES 1000000 |
| 42 | |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 43 | /* Private functions */ |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 44 | |
| 45 | /* _omap3_dpll_write_clken - write clken_bits arg to a DPLL's enable bits */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 46 | static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits) |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 47 | { |
| 48 | const struct dpll_data *dd; |
| 49 | u32 v; |
| 50 | |
| 51 | dd = clk->dpll_data; |
| 52 | |
| 53 | v = __raw_readl(dd->control_reg); |
| 54 | v &= ~dd->enable_mask; |
| 55 | v |= clken_bits << __ffs(dd->enable_mask); |
| 56 | __raw_writel(v, dd->control_reg); |
| 57 | } |
| 58 | |
| 59 | /* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 60 | static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state) |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 61 | { |
| 62 | const struct dpll_data *dd; |
| 63 | int i = 0; |
| 64 | int ret = -EINVAL; |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 65 | const char *clk_name; |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 66 | |
| 67 | dd = clk->dpll_data; |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 68 | clk_name = __clk_get_name(clk->hw.clk); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 69 | |
| 70 | state <<= __ffs(dd->idlest_mask); |
| 71 | |
| 72 | while (((__raw_readl(dd->idlest_reg) & dd->idlest_mask) != state) && |
| 73 | i < MAX_DPLL_WAIT_TRIES) { |
| 74 | i++; |
| 75 | udelay(1); |
| 76 | } |
| 77 | |
| 78 | if (i == MAX_DPLL_WAIT_TRIES) { |
| 79 | printk(KERN_ERR "clock: %s failed transition to '%s'\n", |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 80 | clk_name, (state) ? "locked" : "bypassed"); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 81 | } else { |
| 82 | pr_debug("clock: %s transition to '%s' in %d loops\n", |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 83 | clk_name, (state) ? "locked" : "bypassed", i); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 84 | |
| 85 | ret = 0; |
| 86 | } |
| 87 | |
| 88 | return ret; |
| 89 | } |
| 90 | |
| 91 | /* From 3430 TRM ES2 4.7.6.2 */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 92 | static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n) |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 93 | { |
| 94 | unsigned long fint; |
| 95 | u16 f = 0; |
| 96 | |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 97 | fint = __clk_get_rate(clk->dpll_data->clk_ref) / n; |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 98 | |
| 99 | pr_debug("clock: fint is %lu\n", fint); |
| 100 | |
| 101 | if (fint >= 750000 && fint <= 1000000) |
| 102 | f = 0x3; |
| 103 | else if (fint > 1000000 && fint <= 1250000) |
| 104 | f = 0x4; |
| 105 | else if (fint > 1250000 && fint <= 1500000) |
| 106 | f = 0x5; |
| 107 | else if (fint > 1500000 && fint <= 1750000) |
| 108 | f = 0x6; |
| 109 | else if (fint > 1750000 && fint <= 2100000) |
| 110 | f = 0x7; |
| 111 | else if (fint > 7500000 && fint <= 10000000) |
| 112 | f = 0xB; |
| 113 | else if (fint > 10000000 && fint <= 12500000) |
| 114 | f = 0xC; |
| 115 | else if (fint > 12500000 && fint <= 15000000) |
| 116 | f = 0xD; |
| 117 | else if (fint > 15000000 && fint <= 17500000) |
| 118 | f = 0xE; |
| 119 | else if (fint > 17500000 && fint <= 21000000) |
| 120 | f = 0xF; |
| 121 | else |
| 122 | pr_debug("clock: unknown freqsel setting for %d\n", n); |
| 123 | |
| 124 | return f; |
| 125 | } |
| 126 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 127 | /* |
| 128 | * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness |
| 129 | * @clk: pointer to a DPLL struct clk |
| 130 | * |
| 131 | * Instructs a non-CORE DPLL to lock. Waits for the DPLL to report |
| 132 | * readiness before returning. Will save and restore the DPLL's |
| 133 | * autoidle state across the enable, per the CDP code. If the DPLL |
| 134 | * locked successfully, return 0; if the DPLL did not lock in the time |
| 135 | * allotted, or DPLL3 was passed in, return -EINVAL. |
| 136 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 137 | static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk) |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 138 | { |
Vikram Pandita | 55ffe16 | 2012-07-04 05:00:44 -0600 | [diff] [blame] | 139 | const struct dpll_data *dd; |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 140 | u8 ai; |
Vikram Pandita | 55ffe16 | 2012-07-04 05:00:44 -0600 | [diff] [blame] | 141 | u8 state = 1; |
| 142 | int r = 0; |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 143 | |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 144 | pr_debug("clock: locking DPLL %s\n", __clk_get_name(clk->hw.clk)); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 145 | |
Vikram Pandita | 55ffe16 | 2012-07-04 05:00:44 -0600 | [diff] [blame] | 146 | dd = clk->dpll_data; |
| 147 | state <<= __ffs(dd->idlest_mask); |
| 148 | |
| 149 | /* Check if already locked */ |
| 150 | if ((__raw_readl(dd->idlest_reg) & dd->idlest_mask) == state) |
| 151 | goto done; |
| 152 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 153 | ai = omap3_dpll_autoidle_read(clk); |
| 154 | |
Vaibhav Bedia | d76316f | 2012-05-07 23:55:30 -0600 | [diff] [blame] | 155 | if (ai) |
| 156 | omap3_dpll_deny_idle(clk); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 157 | |
| 158 | _omap3_dpll_write_clken(clk, DPLL_LOCKED); |
| 159 | |
| 160 | r = _omap3_wait_dpll_status(clk, 1); |
| 161 | |
| 162 | if (ai) |
| 163 | omap3_dpll_allow_idle(clk); |
| 164 | |
Vikram Pandita | 55ffe16 | 2012-07-04 05:00:44 -0600 | [diff] [blame] | 165 | done: |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 166 | return r; |
| 167 | } |
| 168 | |
| 169 | /* |
| 170 | * _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness |
| 171 | * @clk: pointer to a DPLL struct clk |
| 172 | * |
| 173 | * Instructs a non-CORE DPLL to enter low-power bypass mode. In |
| 174 | * bypass mode, the DPLL's rate is set equal to its parent clock's |
| 175 | * rate. Waits for the DPLL to report readiness before returning. |
| 176 | * Will save and restore the DPLL's autoidle state across the enable, |
| 177 | * per the CDP code. If the DPLL entered bypass mode successfully, |
| 178 | * return 0; if the DPLL did not enter bypass in the time allotted, or |
| 179 | * DPLL3 was passed in, or the DPLL does not support low-power bypass, |
| 180 | * return -EINVAL. |
| 181 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 182 | static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk) |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 183 | { |
| 184 | int r; |
| 185 | u8 ai; |
| 186 | |
| 187 | if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS))) |
| 188 | return -EINVAL; |
| 189 | |
| 190 | pr_debug("clock: configuring DPLL %s for low-power bypass\n", |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 191 | __clk_get_name(clk->hw.clk)); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 192 | |
| 193 | ai = omap3_dpll_autoidle_read(clk); |
| 194 | |
| 195 | _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS); |
| 196 | |
| 197 | r = _omap3_wait_dpll_status(clk, 0); |
| 198 | |
| 199 | if (ai) |
| 200 | omap3_dpll_allow_idle(clk); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 201 | |
| 202 | return r; |
| 203 | } |
| 204 | |
| 205 | /* |
| 206 | * _omap3_noncore_dpll_stop - instruct a DPLL to stop |
| 207 | * @clk: pointer to a DPLL struct clk |
| 208 | * |
| 209 | * Instructs a non-CORE DPLL to enter low-power stop. Will save and |
| 210 | * restore the DPLL's autoidle state across the stop, per the CDP |
| 211 | * code. If DPLL3 was passed in, or the DPLL does not support |
| 212 | * low-power stop, return -EINVAL; otherwise, return 0. |
| 213 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 214 | static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk) |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 215 | { |
| 216 | u8 ai; |
| 217 | |
| 218 | if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP))) |
| 219 | return -EINVAL; |
| 220 | |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 221 | pr_debug("clock: stopping DPLL %s\n", __clk_get_name(clk->hw.clk)); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 222 | |
| 223 | ai = omap3_dpll_autoidle_read(clk); |
| 224 | |
| 225 | _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP); |
| 226 | |
| 227 | if (ai) |
| 228 | omap3_dpll_allow_idle(clk); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 229 | |
| 230 | return 0; |
| 231 | } |
| 232 | |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 233 | /** |
Jon Hunter | a36795c | 2010-12-21 21:31:43 -0700 | [diff] [blame] | 234 | * _lookup_dco - Lookup DCO used by j-type DPLL |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 235 | * @clk: pointer to a DPLL struct clk |
| 236 | * @dco: digital control oscillator selector |
Jon Hunter | a36795c | 2010-12-21 21:31:43 -0700 | [diff] [blame] | 237 | * @m: DPLL multiplier to set |
| 238 | * @n: DPLL divider to set |
| 239 | * |
| 240 | * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)" |
| 241 | * |
| 242 | * XXX This code is not needed for 3430/AM35xx; can it be optimized |
| 243 | * out in non-multi-OMAP builds for those chips? |
| 244 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 245 | static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n) |
Jon Hunter | a36795c | 2010-12-21 21:31:43 -0700 | [diff] [blame] | 246 | { |
| 247 | unsigned long fint, clkinp; /* watch out for overflow */ |
| 248 | |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 249 | clkinp = __clk_get_rate(__clk_get_parent(clk->hw.clk)); |
Jon Hunter | a36795c | 2010-12-21 21:31:43 -0700 | [diff] [blame] | 250 | fint = (clkinp / n) * m; |
| 251 | |
| 252 | if (fint < 1000000000) |
| 253 | *dco = 2; |
| 254 | else |
| 255 | *dco = 4; |
| 256 | } |
| 257 | |
| 258 | /** |
| 259 | * _lookup_sddiv - Calculate sigma delta divider for j-type DPLL |
| 260 | * @clk: pointer to a DPLL struct clk |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 261 | * @sd_div: target sigma-delta divider |
| 262 | * @m: DPLL multiplier to set |
| 263 | * @n: DPLL divider to set |
| 264 | * |
| 265 | * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)" |
| 266 | * |
| 267 | * XXX This code is not needed for 3430/AM35xx; can it be optimized |
| 268 | * out in non-multi-OMAP builds for those chips? |
| 269 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 270 | static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n) |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 271 | { |
Jon Hunter | a36795c | 2010-12-21 21:31:43 -0700 | [diff] [blame] | 272 | unsigned long clkinp, sd; /* watch out for overflow */ |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 273 | int mod1, mod2; |
| 274 | |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 275 | clkinp = __clk_get_rate(__clk_get_parent(clk->hw.clk)); |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 276 | |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 277 | /* |
| 278 | * target sigma-delta to near 250MHz |
| 279 | * sd = ceil[(m/(n+1)) * (clkinp_MHz / 250)] |
| 280 | */ |
| 281 | clkinp /= 100000; /* shift from MHz to 10*Hz for 38.4 and 19.2 */ |
| 282 | mod1 = (clkinp * m) % (250 * n); |
| 283 | sd = (clkinp * m) / (250 * n); |
| 284 | mod2 = sd % 10; |
| 285 | sd /= 10; |
| 286 | |
| 287 | if (mod1 || mod2) |
| 288 | sd++; |
| 289 | *sd_div = sd; |
| 290 | } |
| 291 | |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 292 | /* |
| 293 | * _omap3_noncore_dpll_program - set non-core DPLL M,N values directly |
Jon Hunter | 3ff51ed | 2012-12-15 01:35:46 -0700 | [diff] [blame] | 294 | * @clk: struct clk * of DPLL to set |
| 295 | * @freqsel: FREQSEL value to set |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 296 | * |
Jon Hunter | 3ff51ed | 2012-12-15 01:35:46 -0700 | [diff] [blame] | 297 | * Program the DPLL with the last M, N values calculated, and wait for |
| 298 | * the DPLL to lock. Returns -EINVAL upon error, or 0 upon success. |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 299 | */ |
Jon Hunter | 3ff51ed | 2012-12-15 01:35:46 -0700 | [diff] [blame] | 300 | static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel) |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 301 | { |
| 302 | struct dpll_data *dd = clk->dpll_data; |
Jon Hunter | a36795c | 2010-12-21 21:31:43 -0700 | [diff] [blame] | 303 | u8 dco, sd_div; |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 304 | u32 v; |
| 305 | |
| 306 | /* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */ |
| 307 | _omap3_noncore_dpll_bypass(clk); |
| 308 | |
Vishwanath BS | 5eb75f5 | 2010-02-24 12:05:57 -0700 | [diff] [blame] | 309 | /* |
| 310 | * Set jitter correction. No jitter correction for OMAP4 and 3630 |
| 311 | * since freqsel field is no longer present |
| 312 | */ |
Vaibhav Hiremath | 78da264 | 2012-08-24 20:24:24 +0530 | [diff] [blame] | 313 | if (!soc_is_am33xx() && !cpu_is_omap44xx() && !cpu_is_omap3630()) { |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 314 | v = __raw_readl(dd->control_reg); |
| 315 | v &= ~dd->freqsel_mask; |
| 316 | v |= freqsel << __ffs(dd->freqsel_mask); |
| 317 | __raw_writel(v, dd->control_reg); |
| 318 | } |
| 319 | |
| 320 | /* Set DPLL multiplier, divider */ |
| 321 | v = __raw_readl(dd->mult_div1_reg); |
| 322 | v &= ~(dd->mult_mask | dd->div1_mask); |
Jon Hunter | 3ff51ed | 2012-12-15 01:35:46 -0700 | [diff] [blame] | 323 | v |= dd->last_rounded_m << __ffs(dd->mult_mask); |
| 324 | v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask); |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 325 | |
Jon Hunter | a36795c | 2010-12-21 21:31:43 -0700 | [diff] [blame] | 326 | /* Configure dco and sd_div for dplls that have these fields */ |
| 327 | if (dd->dco_mask) { |
Jon Hunter | 3ff51ed | 2012-12-15 01:35:46 -0700 | [diff] [blame] | 328 | _lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n); |
Jon Hunter | a36795c | 2010-12-21 21:31:43 -0700 | [diff] [blame] | 329 | v &= ~(dd->dco_mask); |
| 330 | v |= dco << __ffs(dd->dco_mask); |
| 331 | } |
| 332 | if (dd->sddiv_mask) { |
Jon Hunter | 3ff51ed | 2012-12-15 01:35:46 -0700 | [diff] [blame] | 333 | _lookup_sddiv(clk, &sd_div, dd->last_rounded_m, |
| 334 | dd->last_rounded_n); |
Jon Hunter | a36795c | 2010-12-21 21:31:43 -0700 | [diff] [blame] | 335 | v &= ~(dd->sddiv_mask); |
| 336 | v |= sd_div << __ffs(dd->sddiv_mask); |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 337 | } |
| 338 | |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 339 | __raw_writel(v, dd->mult_div1_reg); |
| 340 | |
Jon Hunter | 3ff51ed | 2012-12-15 01:35:46 -0700 | [diff] [blame] | 341 | /* Set 4X multiplier and low-power mode */ |
| 342 | if (dd->m4xen_mask || dd->lpmode_mask) { |
| 343 | v = __raw_readl(dd->control_reg); |
| 344 | |
| 345 | if (dd->m4xen_mask) { |
| 346 | if (dd->last_rounded_m4xen) |
| 347 | v |= dd->m4xen_mask; |
| 348 | else |
| 349 | v &= ~dd->m4xen_mask; |
| 350 | } |
| 351 | |
| 352 | if (dd->lpmode_mask) { |
| 353 | if (dd->last_rounded_lpmode) |
| 354 | v |= dd->lpmode_mask; |
| 355 | else |
| 356 | v &= ~dd->lpmode_mask; |
| 357 | } |
| 358 | |
| 359 | __raw_writel(v, dd->control_reg); |
| 360 | } |
| 361 | |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 362 | /* We let the clock framework set the other output dividers later */ |
| 363 | |
| 364 | /* REVISIT: Set ramp-up delay? */ |
| 365 | |
| 366 | _omap3_noncore_dpll_lock(clk); |
| 367 | |
| 368 | return 0; |
| 369 | } |
| 370 | |
| 371 | /* Public functions */ |
| 372 | |
| 373 | /** |
| 374 | * omap3_dpll_recalc - recalculate DPLL rate |
| 375 | * @clk: DPLL struct clk |
| 376 | * |
| 377 | * Recalculate and propagate the DPLL rate. |
| 378 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 379 | unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate) |
| 380 | { |
| 381 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
Paul Walmsley | 455db9c | 2012-11-10 19:32:46 -0700 | [diff] [blame] | 382 | |
Paul Walmsley | 60c3f65 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 383 | return omap2_get_dpll_rate(clk); |
| 384 | } |
| 385 | |
| 386 | /* Non-CORE DPLL (e.g., DPLLs that do not control SDRC) clock functions */ |
| 387 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 388 | /** |
| 389 | * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode |
| 390 | * @clk: pointer to a DPLL struct clk |
| 391 | * |
| 392 | * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock. |
| 393 | * The choice of modes depends on the DPLL's programmed rate: if it is |
| 394 | * the same as the DPLL's parent clock, it will enter bypass; |
| 395 | * otherwise, it will enter lock. This code will wait for the DPLL to |
| 396 | * indicate readiness before returning, unless the DPLL takes too long |
| 397 | * to enter the target state. Intended to be used as the struct clk's |
| 398 | * enable function. If DPLL3 was passed in, or the DPLL does not |
| 399 | * support low-power stop, or if the DPLL took too long to enter |
| 400 | * bypass or lock, return -EINVAL; otherwise, return 0. |
| 401 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 402 | int omap3_noncore_dpll_enable(struct clk_hw *hw) |
| 403 | { |
| 404 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 405 | int r; |
| 406 | struct dpll_data *dd; |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 407 | struct clk *parent; |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 408 | |
| 409 | dd = clk->dpll_data; |
| 410 | if (!dd) |
| 411 | return -EINVAL; |
| 412 | |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 413 | if (clk->clkdm) { |
| 414 | r = clkdm_clk_enable(clk->clkdm, hw->clk); |
| 415 | if (r) { |
| 416 | WARN(1, |
| 417 | "%s: could not enable %s's clockdomain %s: %d\n", |
| 418 | __func__, __clk_get_name(hw->clk), |
| 419 | clk->clkdm->name, r); |
| 420 | return r; |
| 421 | } |
| 422 | } |
| 423 | |
| 424 | parent = __clk_get_parent(hw->clk); |
| 425 | |
| 426 | if (__clk_get_rate(hw->clk) == __clk_get_rate(dd->clk_bypass)) { |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 427 | WARN_ON(parent != dd->clk_bypass); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 428 | r = _omap3_noncore_dpll_bypass(clk); |
| 429 | } else { |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 430 | WARN_ON(parent != dd->clk_ref); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 431 | r = _omap3_noncore_dpll_lock(clk); |
| 432 | } |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 433 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 434 | return r; |
| 435 | } |
| 436 | |
| 437 | /** |
| 438 | * omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop |
| 439 | * @clk: pointer to a DPLL struct clk |
| 440 | * |
| 441 | * Instructs a non-CORE DPLL to enter low-power stop. This function is |
| 442 | * intended for use in struct clkops. No return value. |
| 443 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 444 | void omap3_noncore_dpll_disable(struct clk_hw *hw) |
| 445 | { |
| 446 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
| 447 | |
| 448 | _omap3_noncore_dpll_stop(clk); |
| 449 | if (clk->clkdm) |
| 450 | clkdm_clk_disable(clk->clkdm, hw->clk); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 451 | } |
| 452 | |
| 453 | |
| 454 | /* Non-CORE DPLL rate set code */ |
| 455 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 456 | /** |
| 457 | * omap3_noncore_dpll_set_rate - set non-core DPLL rate |
| 458 | * @clk: struct clk * of DPLL to set |
| 459 | * @rate: rounded target rate |
| 460 | * |
| 461 | * Set the DPLL CLKOUT to the target rate. If the DPLL can enter |
| 462 | * low-power bypass, and the target rate is the bypass source clock |
| 463 | * rate, then configure the DPLL for bypass. Otherwise, round the |
| 464 | * target rate if it hasn't been done already, then program and lock |
| 465 | * the DPLL. Returns -EINVAL upon error, or 0 upon success. |
| 466 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 467 | int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, |
| 468 | unsigned long parent_rate) |
| 469 | { |
| 470 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
| 471 | struct clk *new_parent = NULL; |
| 472 | u16 freqsel = 0; |
| 473 | struct dpll_data *dd; |
| 474 | int ret; |
| 475 | |
| 476 | if (!hw || !rate) |
| 477 | return -EINVAL; |
| 478 | |
| 479 | dd = clk->dpll_data; |
| 480 | if (!dd) |
| 481 | return -EINVAL; |
| 482 | |
| 483 | __clk_prepare(dd->clk_bypass); |
| 484 | clk_enable(dd->clk_bypass); |
| 485 | __clk_prepare(dd->clk_ref); |
| 486 | clk_enable(dd->clk_ref); |
| 487 | |
| 488 | if (__clk_get_rate(dd->clk_bypass) == rate && |
| 489 | (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { |
| 490 | pr_debug("%s: %s: set rate: entering bypass.\n", |
| 491 | __func__, __clk_get_name(hw->clk)); |
| 492 | |
| 493 | ret = _omap3_noncore_dpll_bypass(clk); |
| 494 | if (!ret) |
| 495 | new_parent = dd->clk_bypass; |
| 496 | } else { |
| 497 | if (dd->last_rounded_rate != rate) |
| 498 | rate = __clk_round_rate(hw->clk, rate); |
| 499 | |
| 500 | if (dd->last_rounded_rate == 0) |
| 501 | return -EINVAL; |
| 502 | |
Afzal Mohammed | bb0b732 | 2013-01-31 09:24:52 -0700 | [diff] [blame] | 503 | /* No freqsel on AM335x, OMAP4 and OMAP3630 */ |
| 504 | if (!soc_is_am33xx() && !cpu_is_omap44xx() && |
| 505 | !cpu_is_omap3630()) { |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 506 | freqsel = _omap3_dpll_compute_freqsel(clk, |
| 507 | dd->last_rounded_n); |
Julia Lawall | f64d204 | 2012-12-16 11:30:02 -0800 | [diff] [blame] | 508 | WARN_ON(!freqsel); |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 509 | } |
| 510 | |
| 511 | pr_debug("%s: %s: set rate: locking rate to %lu.\n", |
| 512 | __func__, __clk_get_name(hw->clk), rate); |
| 513 | |
Jon Hunter | 3ff51ed | 2012-12-15 01:35:46 -0700 | [diff] [blame] | 514 | ret = omap3_noncore_dpll_program(clk, freqsel); |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 515 | if (!ret) |
| 516 | new_parent = dd->clk_ref; |
| 517 | } |
| 518 | /* |
| 519 | * FIXME - this is all wrong. common code handles reparenting and |
| 520 | * migrating prepare/enable counts. dplls should be a multiplexer |
| 521 | * clock and this should be a set_parent operation so that all of that |
| 522 | * stuff is inherited for free |
| 523 | */ |
| 524 | |
| 525 | if (!ret) |
| 526 | __clk_reparent(hw->clk, new_parent); |
| 527 | |
| 528 | clk_disable(dd->clk_ref); |
| 529 | __clk_unprepare(dd->clk_ref); |
| 530 | clk_disable(dd->clk_bypass); |
| 531 | __clk_unprepare(dd->clk_bypass); |
| 532 | |
| 533 | return 0; |
| 534 | } |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 535 | |
| 536 | /* DPLL autoidle read/set code */ |
| 537 | |
| 538 | /** |
| 539 | * omap3_dpll_autoidle_read - read a DPLL's autoidle bits |
| 540 | * @clk: struct clk * of the DPLL to read |
| 541 | * |
| 542 | * Return the DPLL's autoidle bits, shifted down to bit 0. Returns |
| 543 | * -EINVAL if passed a null pointer or if the struct clk does not |
| 544 | * appear to refer to a DPLL. |
| 545 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 546 | u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk) |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 547 | { |
| 548 | const struct dpll_data *dd; |
| 549 | u32 v; |
| 550 | |
| 551 | if (!clk || !clk->dpll_data) |
| 552 | return -EINVAL; |
| 553 | |
| 554 | dd = clk->dpll_data; |
| 555 | |
Vaibhav Bedia | d76316f | 2012-05-07 23:55:30 -0600 | [diff] [blame] | 556 | if (!dd->autoidle_reg) |
| 557 | return -EINVAL; |
| 558 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 559 | v = __raw_readl(dd->autoidle_reg); |
| 560 | v &= dd->autoidle_mask; |
| 561 | v >>= __ffs(dd->autoidle_mask); |
| 562 | |
| 563 | return v; |
| 564 | } |
| 565 | |
| 566 | /** |
| 567 | * omap3_dpll_allow_idle - enable DPLL autoidle bits |
| 568 | * @clk: struct clk * of the DPLL to operate on |
| 569 | * |
| 570 | * Enable DPLL automatic idle control. This automatic idle mode |
| 571 | * switching takes effect only when the DPLL is locked, at least on |
| 572 | * OMAP3430. The DPLL will enter low-power stop when its downstream |
| 573 | * clocks are gated. No return value. |
| 574 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 575 | void omap3_dpll_allow_idle(struct clk_hw_omap *clk) |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 576 | { |
| 577 | const struct dpll_data *dd; |
| 578 | u32 v; |
| 579 | |
| 580 | if (!clk || !clk->dpll_data) |
| 581 | return; |
| 582 | |
| 583 | dd = clk->dpll_data; |
| 584 | |
Paul Walmsley | 455db9c | 2012-11-10 19:32:46 -0700 | [diff] [blame] | 585 | if (!dd->autoidle_reg) |
Vaibhav Bedia | d76316f | 2012-05-07 23:55:30 -0600 | [diff] [blame] | 586 | return; |
Vaibhav Bedia | d76316f | 2012-05-07 23:55:30 -0600 | [diff] [blame] | 587 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 588 | /* |
| 589 | * REVISIT: CORE DPLL can optionally enter low-power bypass |
| 590 | * by writing 0x5 instead of 0x1. Add some mechanism to |
| 591 | * optionally enter this mode. |
| 592 | */ |
| 593 | v = __raw_readl(dd->autoidle_reg); |
| 594 | v &= ~dd->autoidle_mask; |
| 595 | v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask); |
| 596 | __raw_writel(v, dd->autoidle_reg); |
Vaibhav Bedia | d76316f | 2012-05-07 23:55:30 -0600 | [diff] [blame] | 597 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 598 | } |
| 599 | |
| 600 | /** |
| 601 | * omap3_dpll_deny_idle - prevent DPLL from automatically idling |
| 602 | * @clk: struct clk * of the DPLL to operate on |
| 603 | * |
| 604 | * Disable DPLL automatic idle control. No return value. |
| 605 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 606 | void omap3_dpll_deny_idle(struct clk_hw_omap *clk) |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 607 | { |
| 608 | const struct dpll_data *dd; |
| 609 | u32 v; |
| 610 | |
| 611 | if (!clk || !clk->dpll_data) |
| 612 | return; |
| 613 | |
| 614 | dd = clk->dpll_data; |
| 615 | |
Paul Walmsley | 455db9c | 2012-11-10 19:32:46 -0700 | [diff] [blame] | 616 | if (!dd->autoidle_reg) |
Vaibhav Bedia | d76316f | 2012-05-07 23:55:30 -0600 | [diff] [blame] | 617 | return; |
Vaibhav Bedia | d76316f | 2012-05-07 23:55:30 -0600 | [diff] [blame] | 618 | |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 619 | v = __raw_readl(dd->autoidle_reg); |
| 620 | v &= ~dd->autoidle_mask; |
| 621 | v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask); |
| 622 | __raw_writel(v, dd->autoidle_reg); |
| 623 | |
| 624 | } |
| 625 | |
| 626 | /* Clock control for DPLL outputs */ |
| 627 | |
| 628 | /** |
| 629 | * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate |
| 630 | * @clk: DPLL output struct clk |
| 631 | * |
| 632 | * Using parent clock DPLL data, look up DPLL state. If locked, set our |
| 633 | * rate to the dpll_clk * 2; otherwise, just use dpll_clk. |
| 634 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 635 | unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, |
| 636 | unsigned long parent_rate) |
| 637 | { |
| 638 | const struct dpll_data *dd; |
| 639 | unsigned long rate; |
| 640 | u32 v; |
| 641 | struct clk_hw_omap *pclk = NULL; |
| 642 | struct clk *parent; |
| 643 | |
| 644 | /* Walk up the parents of clk, looking for a DPLL */ |
| 645 | do { |
| 646 | do { |
| 647 | parent = __clk_get_parent(hw->clk); |
| 648 | hw = __clk_get_hw(parent); |
| 649 | } while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC)); |
| 650 | if (!hw) |
| 651 | break; |
| 652 | pclk = to_clk_hw_omap(hw); |
| 653 | } while (pclk && !pclk->dpll_data); |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 654 | |
Paul Walmsley | a032d33 | 2012-08-03 09:21:10 -0600 | [diff] [blame] | 655 | /* clk does not have a DPLL as a parent? error in the clock data */ |
| 656 | if (!pclk) { |
| 657 | WARN_ON(1); |
| 658 | return 0; |
| 659 | } |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 660 | |
| 661 | dd = pclk->dpll_data; |
| 662 | |
| 663 | WARN_ON(!dd->enable_mask); |
| 664 | |
| 665 | v = __raw_readl(dd->control_reg) & dd->enable_mask; |
| 666 | v >>= __ffs(dd->enable_mask); |
Richard Woodruff | 358965d | 2010-02-22 22:09:08 -0700 | [diff] [blame] | 667 | if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE)) |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 668 | rate = parent_rate; |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 669 | else |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 670 | rate = parent_rate * 2; |
Rajendra Nayak | a1391d2 | 2009-12-08 18:47:16 -0700 | [diff] [blame] | 671 | return rate; |
| 672 | } |
Vaibhav Hiremath | 353cec4 | 2012-07-05 08:05:15 -0700 | [diff] [blame] | 673 | |
| 674 | /* OMAP3/4 non-CORE DPLL clkops */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 675 | const struct clk_hw_omap_ops clkhwops_omap3_dpll = { |
| 676 | .allow_idle = omap3_dpll_allow_idle, |
| 677 | .deny_idle = omap3_dpll_deny_idle, |
| 678 | }; |