dmitry pervushin | 5cccd37 | 2009-04-23 12:24:13 +0100 | [diff] [blame^] | 1 | /* |
| 2 | * Clock manipulation routines for Freescale STMP37XX/STMP378X |
| 3 | * |
| 4 | * Author: Vitaly Wool <vital@embeddedalley.com> |
| 5 | * |
| 6 | * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved. |
| 7 | * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved. |
| 8 | */ |
| 9 | |
| 10 | /* |
| 11 | * The code contained herein is licensed under the GNU General Public |
| 12 | * License. You may obtain a copy of the GNU General Public License |
| 13 | * Version 2 or later at the following locations: |
| 14 | * |
| 15 | * http://www.opensource.org/licenses/gpl-license.html |
| 16 | * http://www.gnu.org/copyleft/gpl.html |
| 17 | */ |
| 18 | #include <linux/kernel.h> |
| 19 | #include <linux/module.h> |
| 20 | #include <linux/init.h> |
| 21 | #include <linux/clk.h> |
| 22 | #include <linux/spinlock.h> |
| 23 | #include <linux/errno.h> |
| 24 | #include <linux/err.h> |
| 25 | #include <linux/delay.h> |
| 26 | #include <linux/io.h> |
| 27 | |
| 28 | #include <asm/mach-types.h> |
| 29 | #include <asm/clkdev.h> |
| 30 | #include <mach/regs-clkctrl.h> |
| 31 | |
| 32 | #include "clock.h" |
| 33 | |
| 34 | static DEFINE_SPINLOCK(clocks_lock); |
| 35 | |
| 36 | static struct clk osc_24M; |
| 37 | static struct clk pll_clk; |
| 38 | static struct clk cpu_clk; |
| 39 | static struct clk hclk; |
| 40 | |
| 41 | static int propagate_rate(struct clk *); |
| 42 | |
| 43 | static inline int clk_is_busy(struct clk *clk) |
| 44 | { |
| 45 | return __raw_readl(clk->busy_reg) & (1 << clk->busy_bit); |
| 46 | } |
| 47 | |
| 48 | static inline int clk_good(struct clk *clk) |
| 49 | { |
| 50 | return clk && !IS_ERR(clk) && clk->ops; |
| 51 | } |
| 52 | |
| 53 | static int std_clk_enable(struct clk *clk) |
| 54 | { |
| 55 | if (clk->enable_reg) { |
| 56 | u32 clk_reg = __raw_readl(clk->enable_reg); |
| 57 | if (clk->enable_negate) |
| 58 | clk_reg &= ~(1 << clk->enable_shift); |
| 59 | else |
| 60 | clk_reg |= (1 << clk->enable_shift); |
| 61 | __raw_writel(clk_reg, clk->enable_reg); |
| 62 | if (clk->enable_wait) |
| 63 | udelay(clk->enable_wait); |
| 64 | return 0; |
| 65 | } else |
| 66 | return -EINVAL; |
| 67 | } |
| 68 | |
| 69 | static int std_clk_disable(struct clk *clk) |
| 70 | { |
| 71 | if (clk->enable_reg) { |
| 72 | u32 clk_reg = __raw_readl(clk->enable_reg); |
| 73 | if (clk->enable_negate) |
| 74 | clk_reg |= (1 << clk->enable_shift); |
| 75 | else |
| 76 | clk_reg &= ~(1 << clk->enable_shift); |
| 77 | __raw_writel(clk_reg, clk->enable_reg); |
| 78 | return 0; |
| 79 | } else |
| 80 | return -EINVAL; |
| 81 | } |
| 82 | |
| 83 | static int io_set_rate(struct clk *clk, u32 rate) |
| 84 | { |
| 85 | u32 reg_frac, clkctrl_frac; |
| 86 | int i, ret = 0, mask = 0x1f; |
| 87 | |
| 88 | clkctrl_frac = (clk->parent->rate * 18 + rate - 1) / rate; |
| 89 | |
| 90 | if (clkctrl_frac < 18 || clkctrl_frac > 35) { |
| 91 | ret = -EINVAL; |
| 92 | goto out; |
| 93 | } |
| 94 | |
| 95 | reg_frac = __raw_readl(clk->scale_reg); |
| 96 | reg_frac &= ~(mask << clk->scale_shift); |
| 97 | __raw_writel(reg_frac | (clkctrl_frac << clk->scale_shift), |
| 98 | clk->scale_reg); |
| 99 | if (clk->busy_reg) { |
| 100 | for (i = 10000; i; i--) |
| 101 | if (!clk_is_busy(clk)) |
| 102 | break; |
| 103 | if (!i) |
| 104 | ret = -ETIMEDOUT; |
| 105 | else |
| 106 | ret = 0; |
| 107 | } |
| 108 | out: |
| 109 | return ret; |
| 110 | } |
| 111 | |
| 112 | static long io_get_rate(struct clk *clk) |
| 113 | { |
| 114 | long rate = clk->parent->rate * 18; |
| 115 | int mask = 0x1f; |
| 116 | |
| 117 | rate /= (__raw_readl(clk->scale_reg) >> clk->scale_shift) & mask; |
| 118 | clk->rate = rate; |
| 119 | |
| 120 | return rate; |
| 121 | } |
| 122 | |
| 123 | static long per_get_rate(struct clk *clk) |
| 124 | { |
| 125 | long rate = clk->parent->rate; |
| 126 | long div; |
| 127 | const int mask = 0xff; |
| 128 | |
| 129 | if (clk->enable_reg && |
| 130 | !(__raw_readl(clk->enable_reg) & clk->enable_shift)) |
| 131 | clk->rate = 0; |
| 132 | else { |
| 133 | div = (__raw_readl(clk->scale_reg) >> clk->scale_shift) & mask; |
| 134 | if (div) |
| 135 | rate /= div; |
| 136 | clk->rate = rate; |
| 137 | } |
| 138 | |
| 139 | return clk->rate; |
| 140 | } |
| 141 | |
| 142 | static int per_set_rate(struct clk *clk, u32 rate) |
| 143 | { |
| 144 | int ret = -EINVAL; |
| 145 | int div = (clk->parent->rate + rate - 1) / rate; |
| 146 | u32 reg_frac; |
| 147 | const int mask = 0xff; |
| 148 | int try = 10; |
| 149 | int i = -1; |
| 150 | |
| 151 | if (div == 0 || div > mask) |
| 152 | goto out; |
| 153 | |
| 154 | reg_frac = __raw_readl(clk->scale_reg); |
| 155 | reg_frac &= ~(mask << clk->scale_shift); |
| 156 | |
| 157 | while (try--) { |
| 158 | __raw_writel(reg_frac | (div << clk->scale_shift), |
| 159 | clk->scale_reg); |
| 160 | |
| 161 | if (clk->busy_reg) { |
| 162 | for (i = 10000; i; i--) |
| 163 | if (!clk_is_busy(clk)) |
| 164 | break; |
| 165 | } |
| 166 | if (i) |
| 167 | break; |
| 168 | } |
| 169 | |
| 170 | if (!i) |
| 171 | ret = -ETIMEDOUT; |
| 172 | else |
| 173 | ret = 0; |
| 174 | |
| 175 | out: |
| 176 | if (ret != 0) |
| 177 | printk(KERN_ERR "%s: error %d\n", __func__, ret); |
| 178 | return ret; |
| 179 | } |
| 180 | |
| 181 | static long lcdif_get_rate(struct clk *clk) |
| 182 | { |
| 183 | long rate = clk->parent->rate; |
| 184 | long div; |
| 185 | const int mask = 0xff; |
| 186 | |
| 187 | div = (__raw_readl(clk->scale_reg) >> clk->scale_shift) & mask; |
| 188 | if (div) { |
| 189 | rate /= div; |
| 190 | div = (HW_CLKCTRL_FRAC_RD() & BM_CLKCTRL_FRAC_PIXFRAC) >> |
| 191 | BP_CLKCTRL_FRAC_PIXFRAC; |
| 192 | rate /= div; |
| 193 | } |
| 194 | clk->rate = rate; |
| 195 | |
| 196 | return rate; |
| 197 | } |
| 198 | |
| 199 | static int lcdif_set_rate(struct clk *clk, u32 rate) |
| 200 | { |
| 201 | int ret = 0; |
| 202 | /* |
| 203 | * On 3700, we can get most timings exact by modifying ref_pix |
| 204 | * and the divider, but keeping the phase timings at 1 (2 |
| 205 | * phases per cycle). |
| 206 | * |
| 207 | * ref_pix can be between 480e6*18/35=246.9MHz and 480e6*18/18=480MHz, |
| 208 | * which is between 18/(18*480e6)=2.084ns and 35/(18*480e6)=4.050ns. |
| 209 | * |
| 210 | * ns_cycle >= 2*18e3/(18*480) = 25/6 |
| 211 | * ns_cycle <= 2*35e3/(18*480) = 875/108 |
| 212 | * |
| 213 | * Multiply the ns_cycle by 'div' to lengthen it until it fits the |
| 214 | * bounds. This is the divider we'll use after ref_pix. |
| 215 | * |
| 216 | * 6 * ns_cycle >= 25 * div |
| 217 | * 108 * ns_cycle <= 875 * div |
| 218 | */ |
| 219 | u32 ns_cycle = 1000000 / rate; |
| 220 | u32 div, reg_val; |
| 221 | u32 lowest_result = (u32) -1; |
| 222 | u32 lowest_div = 0, lowest_fracdiv = 0; |
| 223 | |
| 224 | for (div = 1; div < 256; ++div) { |
| 225 | u32 fracdiv; |
| 226 | u32 ps_result; |
| 227 | int lower_bound = 6 * ns_cycle >= 25 * div; |
| 228 | int upper_bound = 108 * ns_cycle <= 875 * div; |
| 229 | if (!lower_bound) |
| 230 | break; |
| 231 | if (!upper_bound) |
| 232 | continue; |
| 233 | /* |
| 234 | * Found a matching div. Calculate fractional divider needed, |
| 235 | * rounded up. |
| 236 | */ |
| 237 | fracdiv = ((clk->parent->rate / 1000 * 18 / 2) * |
| 238 | ns_cycle + 1000 * div - 1) / |
| 239 | (1000 * div); |
| 240 | if (fracdiv < 18 || fracdiv > 35) { |
| 241 | ret = -EINVAL; |
| 242 | goto out; |
| 243 | } |
| 244 | /* Calculate the actual cycle time this results in */ |
| 245 | ps_result = 6250 * div * fracdiv / 27; |
| 246 | |
| 247 | /* Use the fastest result that doesn't break ns_cycle */ |
| 248 | if (ps_result <= lowest_result) { |
| 249 | lowest_result = ps_result; |
| 250 | lowest_div = div; |
| 251 | lowest_fracdiv = fracdiv; |
| 252 | } |
| 253 | } |
| 254 | |
| 255 | if (div >= 256 || lowest_result == (u32) -1) { |
| 256 | ret = -EINVAL; |
| 257 | goto out; |
| 258 | } |
| 259 | pr_debug("Programming PFD=%u,DIV=%u ref_pix=%uMHz " |
| 260 | "PIXCLK=%uMHz cycle=%u.%03uns\n", |
| 261 | lowest_fracdiv, lowest_div, |
| 262 | 480*18/lowest_fracdiv, 480*18/lowest_fracdiv/lowest_div, |
| 263 | lowest_result / 1000, lowest_result % 1000); |
| 264 | |
| 265 | /* Program ref_pix phase fractional divider */ |
| 266 | HW_CLKCTRL_FRAC_WR((HW_CLKCTRL_FRAC_RD() & ~BM_CLKCTRL_FRAC_PIXFRAC) | |
| 267 | BF_CLKCTRL_FRAC_PIXFRAC(lowest_fracdiv)); |
| 268 | /* Ungate PFD */ |
| 269 | HW_CLKCTRL_FRAC_CLR(BM_CLKCTRL_FRAC_CLKGATEPIX); |
| 270 | |
| 271 | /* Program pix divider */ |
| 272 | reg_val = __raw_readl(clk->scale_reg); |
| 273 | reg_val &= ~(BM_CLKCTRL_PIX_DIV | BM_CLKCTRL_PIX_CLKGATE); |
| 274 | reg_val |= BF_CLKCTRL_PIX_DIV(lowest_div); |
| 275 | __raw_writel(reg_val, clk->scale_reg); |
| 276 | |
| 277 | /* Wait for divider update */ |
| 278 | if (clk->busy_reg) { |
| 279 | int i; |
| 280 | for (i = 10000; i; i--) |
| 281 | if (!clk_is_busy(clk)) |
| 282 | break; |
| 283 | if (!i) { |
| 284 | ret = -ETIMEDOUT; |
| 285 | goto out; |
| 286 | } |
| 287 | } |
| 288 | |
| 289 | /* Switch to ref_pix source */ |
| 290 | HW_CLKCTRL_CLKSEQ_CLR(BM_CLKCTRL_CLKSEQ_BYPASS_PIX); |
| 291 | |
| 292 | out: |
| 293 | return ret; |
| 294 | } |
| 295 | |
| 296 | |
| 297 | static int cpu_set_rate(struct clk *clk, u32 rate) |
| 298 | { |
| 299 | if (rate < 24000) |
| 300 | return -EINVAL; |
| 301 | else if (rate == 24000) { |
| 302 | /* switch to the 24M source */ |
| 303 | clk_set_parent(clk, &osc_24M); |
| 304 | } else { |
| 305 | int i; |
| 306 | u32 clkctrl_cpu = 1; |
| 307 | u32 c = clkctrl_cpu; |
| 308 | u32 clkctrl_frac = 1; |
| 309 | u32 val; |
| 310 | for ( ; c < 0x40; c++) { |
| 311 | u32 f = (pll_clk.rate*18/c + rate/2) / rate; |
| 312 | int s1, s2; |
| 313 | |
| 314 | if (f < 18 || f > 35) |
| 315 | continue; |
| 316 | s1 = pll_clk.rate*18/clkctrl_frac/clkctrl_cpu - rate; |
| 317 | s2 = pll_clk.rate*18/c/f - rate; |
| 318 | pr_debug("%s: s1 %d, s2 %d\n", __func__, s1, s2); |
| 319 | if (abs(s1) > abs(s2)) { |
| 320 | clkctrl_cpu = c; |
| 321 | clkctrl_frac = f; |
| 322 | } |
| 323 | if (s2 == 0) |
| 324 | break; |
| 325 | }; |
| 326 | pr_debug("%s: clkctrl_cpu %d, clkctrl_frac %d\n", __func__, |
| 327 | clkctrl_cpu, clkctrl_frac); |
| 328 | if (c == 0x40) { |
| 329 | int d = pll_clk.rate*18/clkctrl_frac/clkctrl_cpu - |
| 330 | rate; |
| 331 | if (abs(d) > 100 || |
| 332 | clkctrl_frac < 18 || clkctrl_frac > 35) |
| 333 | return -EINVAL; |
| 334 | } |
| 335 | |
| 336 | /* 4.6.2 */ |
| 337 | val = __raw_readl(clk->scale_reg); |
| 338 | val &= ~(0x3f << clk->scale_shift); |
| 339 | val |= clkctrl_frac; |
| 340 | clk_set_parent(clk, &osc_24M); |
| 341 | udelay(10); |
| 342 | __raw_writel(val, clk->scale_reg); |
| 343 | /* ungate */ |
| 344 | __raw_writel(1<<7, clk->scale_reg + 8); |
| 345 | /* write clkctrl_cpu */ |
| 346 | clk->saved_div = clkctrl_cpu; |
| 347 | HW_CLKCTRL_CPU_WR((HW_CLKCTRL_CPU_RD() & ~0x3f) | clkctrl_cpu); |
| 348 | for (i = 10000; i; i--) |
| 349 | if (!clk_is_busy(clk)) |
| 350 | break; |
| 351 | if (!i) { |
| 352 | printk(KERN_ERR "couldn't set up CPU divisor\n"); |
| 353 | return -ETIMEDOUT; |
| 354 | } |
| 355 | clk_set_parent(clk, &pll_clk); |
| 356 | clk->saved_div = 0; |
| 357 | udelay(10); |
| 358 | } |
| 359 | return 0; |
| 360 | } |
| 361 | |
| 362 | static long cpu_get_rate(struct clk *clk) |
| 363 | { |
| 364 | long rate = clk->parent->rate * 18; |
| 365 | |
| 366 | rate /= (__raw_readl(clk->scale_reg) >> clk->scale_shift) & 0x3f; |
| 367 | rate /= HW_CLKCTRL_CPU_RD() & 0x3f; |
| 368 | rate = ((rate + 9) / 10) * 10; |
| 369 | clk->rate = rate; |
| 370 | |
| 371 | return rate; |
| 372 | } |
| 373 | |
| 374 | static long cpu_round_rate(struct clk *clk, u32 rate) |
| 375 | { |
| 376 | unsigned long r = 0; |
| 377 | |
| 378 | if (rate <= 24000) |
| 379 | r = 24000; |
| 380 | else { |
| 381 | u32 clkctrl_cpu = 1; |
| 382 | u32 clkctrl_frac; |
| 383 | do { |
| 384 | clkctrl_frac = |
| 385 | (pll_clk.rate*18 / clkctrl_cpu + rate/2) / rate; |
| 386 | if (clkctrl_frac > 35) |
| 387 | continue; |
| 388 | if (pll_clk.rate*18 / clkctrl_frac / clkctrl_cpu/10 == |
| 389 | rate / 10) |
| 390 | break; |
| 391 | } while (pll_clk.rate / 2 >= clkctrl_cpu++ * rate); |
| 392 | if (pll_clk.rate / 2 < (clkctrl_cpu - 1) * rate) |
| 393 | clkctrl_cpu--; |
| 394 | pr_debug("%s: clkctrl_cpu %d, clkctrl_frac %d\n", __func__, |
| 395 | clkctrl_cpu, clkctrl_frac); |
| 396 | if (clkctrl_frac < 18) |
| 397 | clkctrl_frac = 18; |
| 398 | if (clkctrl_frac > 35) |
| 399 | clkctrl_frac = 35; |
| 400 | |
| 401 | r = pll_clk.rate * 18; |
| 402 | r /= clkctrl_frac; |
| 403 | r /= clkctrl_cpu; |
| 404 | r = 10 * ((r + 9) / 10); |
| 405 | } |
| 406 | return r; |
| 407 | } |
| 408 | |
| 409 | static long emi_get_rate(struct clk *clk) |
| 410 | { |
| 411 | long rate = clk->parent->rate * 18; |
| 412 | |
| 413 | rate /= (__raw_readl(clk->scale_reg) >> clk->scale_shift) & 0x3f; |
| 414 | rate /= HW_CLKCTRL_EMI_RD() & 0x3f; |
| 415 | clk->rate = rate; |
| 416 | |
| 417 | return rate; |
| 418 | } |
| 419 | |
| 420 | static int clkseq_set_parent(struct clk *clk, struct clk *parent) |
| 421 | { |
| 422 | int ret = -EINVAL; |
| 423 | int shift = 8; |
| 424 | |
| 425 | /* bypass? */ |
| 426 | if (parent == &osc_24M) |
| 427 | shift = 4; |
| 428 | |
| 429 | if (clk->bypass_reg) { |
| 430 | u32 hbus_mask = BM_CLKCTRL_HBUS_DIV_FRAC_EN | |
| 431 | BM_CLKCTRL_HBUS_DIV; |
| 432 | |
| 433 | if (clk == &cpu_clk && shift == 4) { |
| 434 | u32 hbus_val = HW_CLKCTRL_HBUS_RD(); |
| 435 | u32 cpu_val = HW_CLKCTRL_CPU_RD(); |
| 436 | hbus_val &= ~hbus_mask; |
| 437 | hbus_val |= 1; |
| 438 | clk->saved_div = cpu_val & BM_CLKCTRL_CPU_DIV_CPU; |
| 439 | cpu_val &= ~BM_CLKCTRL_CPU_DIV_CPU; |
| 440 | cpu_val |= 1; |
| 441 | __raw_writel(1 << clk->bypass_shift, |
| 442 | clk->bypass_reg + shift); |
| 443 | if (machine_is_stmp378x()) { |
| 444 | HW_CLKCTRL_HBUS_WR(hbus_val); |
| 445 | HW_CLKCTRL_CPU_WR(cpu_val); |
| 446 | hclk.rate = 0; |
| 447 | } |
| 448 | } else if (clk == &cpu_clk && shift == 8) { |
| 449 | u32 hbus_val = HW_CLKCTRL_HBUS_RD(); |
| 450 | u32 cpu_val = HW_CLKCTRL_CPU_RD(); |
| 451 | hbus_val &= ~hbus_mask; |
| 452 | hbus_val |= 2; |
| 453 | cpu_val &= ~BM_CLKCTRL_CPU_DIV_CPU; |
| 454 | if (clk->saved_div) |
| 455 | cpu_val |= clk->saved_div; |
| 456 | else |
| 457 | cpu_val |= 2; |
| 458 | if (machine_is_stmp378x()) { |
| 459 | HW_CLKCTRL_HBUS_WR(hbus_val); |
| 460 | HW_CLKCTRL_CPU_WR(cpu_val); |
| 461 | hclk.rate = 0; |
| 462 | } |
| 463 | __raw_writel(1 << clk->bypass_shift, |
| 464 | clk->bypass_reg + shift); |
| 465 | } else |
| 466 | __raw_writel(1 << clk->bypass_shift, |
| 467 | clk->bypass_reg + shift); |
| 468 | |
| 469 | ret = 0; |
| 470 | } |
| 471 | |
| 472 | return ret; |
| 473 | } |
| 474 | |
| 475 | static int hbus_set_rate(struct clk *clk, u32 rate) |
| 476 | { |
| 477 | u8 div = 0; |
| 478 | int is_frac = 0; |
| 479 | u32 clkctrl_hbus; |
| 480 | struct clk *parent = clk->parent; |
| 481 | |
| 482 | pr_debug("%s: rate %d, parent rate %d\n", __func__, rate, |
| 483 | parent->rate); |
| 484 | |
| 485 | if (rate > parent->rate) |
| 486 | return -EINVAL; |
| 487 | |
| 488 | if (((parent->rate + rate/2) / rate) * rate != parent->rate && |
| 489 | parent->rate / rate < 32) { |
| 490 | pr_debug("%s: switching to fractional mode\n", __func__); |
| 491 | is_frac = 1; |
| 492 | } |
| 493 | |
| 494 | if (is_frac) |
| 495 | div = (32 * rate + parent->rate / 2) / parent->rate; |
| 496 | else |
| 497 | div = (parent->rate + rate - 1) / rate; |
| 498 | pr_debug("%s: div calculated is %d\n", __func__, div); |
| 499 | if (!div || div > 0x1f) |
| 500 | return -EINVAL; |
| 501 | |
| 502 | clk_set_parent(&cpu_clk, &osc_24M); |
| 503 | udelay(10); |
| 504 | clkctrl_hbus = __raw_readl(clk->scale_reg); |
| 505 | clkctrl_hbus &= ~0x3f; |
| 506 | clkctrl_hbus |= div; |
| 507 | clkctrl_hbus |= (is_frac << 5); |
| 508 | |
| 509 | __raw_writel(clkctrl_hbus, clk->scale_reg); |
| 510 | if (clk->busy_reg) { |
| 511 | int i; |
| 512 | for (i = 10000; i; i--) |
| 513 | if (!clk_is_busy(clk)) |
| 514 | break; |
| 515 | if (!i) { |
| 516 | printk(KERN_ERR "couldn't set up CPU divisor\n"); |
| 517 | return -ETIMEDOUT; |
| 518 | } |
| 519 | } |
| 520 | clk_set_parent(&cpu_clk, &pll_clk); |
| 521 | __raw_writel(clkctrl_hbus, clk->scale_reg); |
| 522 | udelay(10); |
| 523 | return 0; |
| 524 | } |
| 525 | |
| 526 | static long hbus_get_rate(struct clk *clk) |
| 527 | { |
| 528 | long rate = clk->parent->rate; |
| 529 | |
| 530 | if (__raw_readl(clk->scale_reg) & 0x20) { |
| 531 | rate *= __raw_readl(clk->scale_reg) & 0x1f; |
| 532 | rate /= 32; |
| 533 | } else |
| 534 | rate /= __raw_readl(clk->scale_reg) & 0x1f; |
| 535 | clk->rate = rate; |
| 536 | |
| 537 | return rate; |
| 538 | } |
| 539 | |
| 540 | static int xbus_set_rate(struct clk *clk, u32 rate) |
| 541 | { |
| 542 | u16 div = 0; |
| 543 | u32 clkctrl_xbus; |
| 544 | |
| 545 | pr_debug("%s: rate %d, parent rate %d\n", __func__, rate, |
| 546 | clk->parent->rate); |
| 547 | |
| 548 | div = (clk->parent->rate + rate - 1) / rate; |
| 549 | pr_debug("%s: div calculated is %d\n", __func__, div); |
| 550 | if (!div || div > 0x3ff) |
| 551 | return -EINVAL; |
| 552 | |
| 553 | clkctrl_xbus = __raw_readl(clk->scale_reg); |
| 554 | clkctrl_xbus &= ~0x3ff; |
| 555 | clkctrl_xbus |= div; |
| 556 | __raw_writel(clkctrl_xbus, clk->scale_reg); |
| 557 | if (clk->busy_reg) { |
| 558 | int i; |
| 559 | for (i = 10000; i; i--) |
| 560 | if (!clk_is_busy(clk)) |
| 561 | break; |
| 562 | if (!i) { |
| 563 | printk(KERN_ERR "couldn't set up xbus divisor\n"); |
| 564 | return -ETIMEDOUT; |
| 565 | } |
| 566 | } |
| 567 | return 0; |
| 568 | } |
| 569 | |
| 570 | static long xbus_get_rate(struct clk *clk) |
| 571 | { |
| 572 | long rate = clk->parent->rate; |
| 573 | |
| 574 | rate /= __raw_readl(clk->scale_reg) & 0x3ff; |
| 575 | clk->rate = rate; |
| 576 | |
| 577 | return rate; |
| 578 | } |
| 579 | |
| 580 | |
| 581 | /* Clock ops */ |
| 582 | |
| 583 | static struct clk_ops std_ops = { |
| 584 | .enable = std_clk_enable, |
| 585 | .disable = std_clk_disable, |
| 586 | .get_rate = per_get_rate, |
| 587 | .set_rate = per_set_rate, |
| 588 | .set_parent = clkseq_set_parent, |
| 589 | }; |
| 590 | |
| 591 | static struct clk_ops min_ops = { |
| 592 | .enable = std_clk_enable, |
| 593 | .disable = std_clk_disable, |
| 594 | }; |
| 595 | |
| 596 | static struct clk_ops cpu_ops = { |
| 597 | .enable = std_clk_enable, |
| 598 | .disable = std_clk_disable, |
| 599 | .get_rate = cpu_get_rate, |
| 600 | .set_rate = cpu_set_rate, |
| 601 | .round_rate = cpu_round_rate, |
| 602 | .set_parent = clkseq_set_parent, |
| 603 | }; |
| 604 | |
| 605 | static struct clk_ops io_ops = { |
| 606 | .enable = std_clk_enable, |
| 607 | .disable = std_clk_disable, |
| 608 | .get_rate = io_get_rate, |
| 609 | .set_rate = io_set_rate, |
| 610 | }; |
| 611 | |
| 612 | static struct clk_ops hbus_ops = { |
| 613 | .get_rate = hbus_get_rate, |
| 614 | .set_rate = hbus_set_rate, |
| 615 | }; |
| 616 | |
| 617 | static struct clk_ops xbus_ops = { |
| 618 | .get_rate = xbus_get_rate, |
| 619 | .set_rate = xbus_set_rate, |
| 620 | }; |
| 621 | |
| 622 | static struct clk_ops lcdif_ops = { |
| 623 | .enable = std_clk_enable, |
| 624 | .disable = std_clk_disable, |
| 625 | .get_rate = lcdif_get_rate, |
| 626 | .set_rate = lcdif_set_rate, |
| 627 | .set_parent = clkseq_set_parent, |
| 628 | }; |
| 629 | |
| 630 | static struct clk_ops emi_ops = { |
| 631 | .get_rate = emi_get_rate, |
| 632 | }; |
| 633 | |
| 634 | /* List of on-chip clocks */ |
| 635 | |
| 636 | static struct clk osc_24M = { |
| 637 | .flags = FIXED_RATE | ENABLED, |
| 638 | .rate = 24000, |
| 639 | }; |
| 640 | |
| 641 | static struct clk pll_clk = { |
| 642 | .parent = &osc_24M, |
| 643 | .enable_reg = HW_CLKCTRL_PLLCTRL0_ADDR, |
| 644 | .enable_shift = 16, |
| 645 | .enable_wait = 10, |
| 646 | .flags = FIXED_RATE | ENABLED, |
| 647 | .rate = 480000, |
| 648 | .ops = &min_ops, |
| 649 | }; |
| 650 | |
| 651 | static struct clk cpu_clk = { |
| 652 | .parent = &pll_clk, |
| 653 | .scale_reg = HW_CLKCTRL_FRAC_ADDR, |
| 654 | .scale_shift = 0, |
| 655 | .bypass_reg = HW_CLKCTRL_CLKSEQ_ADDR, |
| 656 | .bypass_shift = 7, |
| 657 | .busy_reg = HW_CLKCTRL_CPU_ADDR, |
| 658 | .busy_bit = 28, |
| 659 | .flags = RATE_PROPAGATES | ENABLED, |
| 660 | .ops = &cpu_ops, |
| 661 | }; |
| 662 | |
| 663 | static struct clk io_clk = { |
| 664 | .parent = &pll_clk, |
| 665 | .enable_reg = HW_CLKCTRL_FRAC_ADDR, |
| 666 | .enable_shift = 31, |
| 667 | .enable_negate = 1, |
| 668 | .scale_reg = HW_CLKCTRL_FRAC_ADDR, |
| 669 | .scale_shift = 24, |
| 670 | .flags = RATE_PROPAGATES | ENABLED, |
| 671 | .ops = &io_ops, |
| 672 | }; |
| 673 | |
| 674 | static struct clk hclk = { |
| 675 | .parent = &cpu_clk, |
| 676 | .scale_reg = HW_CLKCTRL_HBUS_ADDR, |
| 677 | .bypass_reg = HW_CLKCTRL_CLKSEQ_ADDR, |
| 678 | .bypass_shift = 7, |
| 679 | .busy_reg = HW_CLKCTRL_HBUS_ADDR, |
| 680 | .busy_bit = 29, |
| 681 | .flags = RATE_PROPAGATES | ENABLED, |
| 682 | .ops = &hbus_ops, |
| 683 | }; |
| 684 | |
| 685 | static struct clk xclk = { |
| 686 | .parent = &osc_24M, |
| 687 | .scale_reg = HW_CLKCTRL_XBUS_ADDR, |
| 688 | .busy_reg = HW_CLKCTRL_XBUS_ADDR, |
| 689 | .busy_bit = 31, |
| 690 | .flags = RATE_PROPAGATES | ENABLED, |
| 691 | .ops = &xbus_ops, |
| 692 | }; |
| 693 | |
| 694 | static struct clk uart_clk = { |
| 695 | .parent = &xclk, |
| 696 | .enable_reg = HW_CLKCTRL_XTAL_ADDR, |
| 697 | .enable_shift = 31, |
| 698 | .enable_negate = 1, |
| 699 | .flags = ENABLED, |
| 700 | .ops = &min_ops, |
| 701 | }; |
| 702 | |
| 703 | static struct clk audio_clk = { |
| 704 | .parent = &xclk, |
| 705 | .enable_reg = HW_CLKCTRL_XTAL_ADDR, |
| 706 | .enable_shift = 30, |
| 707 | .enable_negate = 1, |
| 708 | .ops = &min_ops, |
| 709 | }; |
| 710 | |
| 711 | static struct clk pwm_clk = { |
| 712 | .parent = &xclk, |
| 713 | .enable_reg = HW_CLKCTRL_XTAL_ADDR, |
| 714 | .enable_shift = 29, |
| 715 | .enable_negate = 1, |
| 716 | .ops = &min_ops, |
| 717 | }; |
| 718 | |
| 719 | static struct clk dri_clk = { |
| 720 | .parent = &xclk, |
| 721 | .enable_reg = HW_CLKCTRL_XTAL_ADDR, |
| 722 | .enable_shift = 28, |
| 723 | .enable_negate = 1, |
| 724 | .ops = &min_ops, |
| 725 | }; |
| 726 | |
| 727 | static struct clk digctl_clk = { |
| 728 | .parent = &xclk, |
| 729 | .enable_reg = HW_CLKCTRL_XTAL_ADDR, |
| 730 | .enable_shift = 27, |
| 731 | .enable_negate = 1, |
| 732 | .ops = &min_ops, |
| 733 | }; |
| 734 | |
| 735 | static struct clk timer_clk = { |
| 736 | .parent = &xclk, |
| 737 | .enable_reg = HW_CLKCTRL_XTAL_ADDR, |
| 738 | .enable_shift = 26, |
| 739 | .enable_negate = 1, |
| 740 | .flags = ENABLED, |
| 741 | .ops = &min_ops, |
| 742 | }; |
| 743 | |
| 744 | static struct clk lcdif_clk = { |
| 745 | .parent = &pll_clk, |
| 746 | .scale_reg = HW_CLKCTRL_PIX_ADDR, |
| 747 | .busy_reg = HW_CLKCTRL_PIX_ADDR, |
| 748 | .busy_bit = 29, |
| 749 | .enable_reg = HW_CLKCTRL_PIX_ADDR, |
| 750 | .enable_shift = 31, |
| 751 | .enable_negate = 1, |
| 752 | .bypass_reg = HW_CLKCTRL_CLKSEQ_ADDR, |
| 753 | .bypass_shift = 1, |
| 754 | .flags = NEEDS_SET_PARENT, |
| 755 | .ops = &lcdif_ops, |
| 756 | }; |
| 757 | |
| 758 | static struct clk ssp_clk = { |
| 759 | .parent = &io_clk, |
| 760 | .scale_reg = HW_CLKCTRL_SSP_ADDR, |
| 761 | .busy_reg = HW_CLKCTRL_SSP_ADDR, |
| 762 | .busy_bit = 29, |
| 763 | .enable_reg = HW_CLKCTRL_SSP_ADDR, |
| 764 | .enable_shift = 31, |
| 765 | .bypass_reg = HW_CLKCTRL_CLKSEQ_ADDR, |
| 766 | .bypass_shift = 5, |
| 767 | .enable_negate = 1, |
| 768 | .flags = NEEDS_SET_PARENT, |
| 769 | .ops = &std_ops, |
| 770 | }; |
| 771 | |
| 772 | static struct clk gpmi_clk = { |
| 773 | .parent = &io_clk, |
| 774 | .scale_reg = HW_CLKCTRL_GPMI_ADDR, |
| 775 | .busy_reg = HW_CLKCTRL_GPMI_ADDR, |
| 776 | .busy_bit = 29, |
| 777 | .enable_reg = HW_CLKCTRL_GPMI_ADDR, |
| 778 | .enable_shift = 31, |
| 779 | .enable_negate = 1, |
| 780 | .bypass_reg = HW_CLKCTRL_CLKSEQ_ADDR, |
| 781 | .bypass_shift = 4, |
| 782 | .flags = NEEDS_SET_PARENT, |
| 783 | .ops = &std_ops, |
| 784 | }; |
| 785 | |
| 786 | static struct clk spdif_clk = { |
| 787 | .parent = &pll_clk, |
| 788 | .enable_reg = HW_CLKCTRL_SPDIF_ADDR, |
| 789 | .enable_shift = 31, |
| 790 | .enable_negate = 1, |
| 791 | .ops = &min_ops, |
| 792 | }; |
| 793 | |
| 794 | static struct clk emi_clk = { |
| 795 | .parent = &pll_clk, |
| 796 | .enable_reg = HW_CLKCTRL_EMI_ADDR, |
| 797 | .enable_shift = 31, |
| 798 | .enable_negate = 1, |
| 799 | .scale_reg = HW_CLKCTRL_FRAC_ADDR, |
| 800 | .scale_shift = 8, |
| 801 | .busy_reg = HW_CLKCTRL_EMI_ADDR, |
| 802 | .busy_bit = 28, |
| 803 | .bypass_reg = HW_CLKCTRL_CLKSEQ_ADDR, |
| 804 | .bypass_shift = 6, |
| 805 | .flags = ENABLED, |
| 806 | .ops = &emi_ops, |
| 807 | }; |
| 808 | |
| 809 | static struct clk ir_clk = { |
| 810 | .parent = &io_clk, |
| 811 | .enable_reg = HW_CLKCTRL_IR_ADDR, |
| 812 | .enable_shift = 31, |
| 813 | .enable_negate = 1, |
| 814 | .bypass_reg = HW_CLKCTRL_CLKSEQ_ADDR, |
| 815 | .bypass_shift = 3, |
| 816 | .ops = &min_ops, |
| 817 | }; |
| 818 | |
| 819 | static struct clk saif_clk = { |
| 820 | .parent = &pll_clk, |
| 821 | .scale_reg = HW_CLKCTRL_SAIF_ADDR, |
| 822 | .busy_reg = HW_CLKCTRL_SAIF_ADDR, |
| 823 | .busy_bit = 29, |
| 824 | .enable_reg = HW_CLKCTRL_SAIF_ADDR, |
| 825 | .enable_shift = 31, |
| 826 | .enable_negate = 1, |
| 827 | .bypass_reg = HW_CLKCTRL_CLKSEQ_ADDR, |
| 828 | .bypass_shift = 0, |
| 829 | .ops = &std_ops, |
| 830 | }; |
| 831 | |
| 832 | static struct clk usb_clk = { |
| 833 | .parent = &pll_clk, |
| 834 | .enable_reg = HW_CLKCTRL_PLLCTRL0_ADDR, |
| 835 | .enable_shift = 18, |
| 836 | .enable_negate = 1, |
| 837 | .ops = &min_ops, |
| 838 | }; |
| 839 | |
| 840 | /* list of all the clocks */ |
| 841 | static __initdata struct clk_lookup onchip_clks[] = { |
| 842 | { |
| 843 | .con_id = "osc_24M", |
| 844 | .clk = &osc_24M, |
| 845 | }, { |
| 846 | .con_id = "pll", |
| 847 | .clk = &pll_clk, |
| 848 | }, { |
| 849 | .con_id = "cpu", |
| 850 | .clk = &cpu_clk, |
| 851 | }, { |
| 852 | .con_id = "hclk", |
| 853 | .clk = &hclk, |
| 854 | }, { |
| 855 | .con_id = "xclk", |
| 856 | .clk = &xclk, |
| 857 | }, { |
| 858 | .con_id = "io", |
| 859 | .clk = &io_clk, |
| 860 | }, { |
| 861 | .con_id = "uart", |
| 862 | .clk = &uart_clk, |
| 863 | }, { |
| 864 | .con_id = "audio", |
| 865 | .clk = &audio_clk, |
| 866 | }, { |
| 867 | .con_id = "pwm", |
| 868 | .clk = &pwm_clk, |
| 869 | }, { |
| 870 | .con_id = "dri", |
| 871 | .clk = &dri_clk, |
| 872 | }, { |
| 873 | .con_id = "digctl", |
| 874 | .clk = &digctl_clk, |
| 875 | }, { |
| 876 | .con_id = "timer", |
| 877 | .clk = &timer_clk, |
| 878 | }, { |
| 879 | .con_id = "lcdif", |
| 880 | .clk = &lcdif_clk, |
| 881 | }, { |
| 882 | .con_id = "ssp", |
| 883 | .clk = &ssp_clk, |
| 884 | }, { |
| 885 | .con_id = "gpmi", |
| 886 | .clk = &gpmi_clk, |
| 887 | }, { |
| 888 | .con_id = "spdif", |
| 889 | .clk = &spdif_clk, |
| 890 | }, { |
| 891 | .con_id = "emi", |
| 892 | .clk = &emi_clk, |
| 893 | }, { |
| 894 | .con_id = "ir", |
| 895 | .clk = &ir_clk, |
| 896 | }, { |
| 897 | .con_id = "saif", |
| 898 | .clk = &saif_clk, |
| 899 | }, { |
| 900 | .con_id = "usb", |
| 901 | .clk = &usb_clk, |
| 902 | }, |
| 903 | }; |
| 904 | |
| 905 | static int __init propagate_rate(struct clk *clk) |
| 906 | { |
| 907 | struct clk_lookup *cl; |
| 908 | |
| 909 | for (cl = onchip_clks; cl < onchip_clks + ARRAY_SIZE(onchip_clks); |
| 910 | cl++) { |
| 911 | if (unlikely(!clk_good(cl->clk))) |
| 912 | continue; |
| 913 | if (cl->clk->parent == clk && cl->clk->ops->get_rate) { |
| 914 | cl->clk->ops->get_rate(cl->clk); |
| 915 | if (cl->clk->flags & RATE_PROPAGATES) |
| 916 | propagate_rate(cl->clk); |
| 917 | } |
| 918 | } |
| 919 | |
| 920 | return 0; |
| 921 | } |
| 922 | |
| 923 | /* Exported API */ |
| 924 | unsigned long clk_get_rate(struct clk *clk) |
| 925 | { |
| 926 | if (unlikely(!clk_good(clk))) |
| 927 | return 0; |
| 928 | |
| 929 | if (clk->rate != 0) |
| 930 | return clk->rate; |
| 931 | |
| 932 | if (clk->ops->get_rate != NULL) |
| 933 | return clk->ops->get_rate(clk); |
| 934 | |
| 935 | return clk_get_rate(clk->parent); |
| 936 | } |
| 937 | EXPORT_SYMBOL(clk_get_rate); |
| 938 | |
| 939 | long clk_round_rate(struct clk *clk, unsigned long rate) |
| 940 | { |
| 941 | if (unlikely(!clk_good(clk))) |
| 942 | return 0; |
| 943 | |
| 944 | if (clk->ops->round_rate) |
| 945 | return clk->ops->round_rate(clk, rate); |
| 946 | |
| 947 | return 0; |
| 948 | } |
| 949 | EXPORT_SYMBOL(clk_round_rate); |
| 950 | |
| 951 | static inline int close_enough(long rate1, long rate2) |
| 952 | { |
| 953 | return rate1 && !((rate2 - rate1) * 1000 / rate1); |
| 954 | } |
| 955 | |
| 956 | int clk_set_rate(struct clk *clk, unsigned long rate) |
| 957 | { |
| 958 | int ret = -EINVAL; |
| 959 | |
| 960 | if (unlikely(!clk_good(clk))) |
| 961 | goto out; |
| 962 | |
| 963 | if (clk->flags & FIXED_RATE || !clk->ops->set_rate) |
| 964 | goto out; |
| 965 | |
| 966 | else if (!close_enough(clk->rate, rate)) { |
| 967 | ret = clk->ops->set_rate(clk, rate); |
| 968 | if (ret < 0) |
| 969 | goto out; |
| 970 | clk->rate = rate; |
| 971 | if (clk->flags & RATE_PROPAGATES) |
| 972 | propagate_rate(clk); |
| 973 | } else |
| 974 | ret = 0; |
| 975 | |
| 976 | out: |
| 977 | return ret; |
| 978 | } |
| 979 | EXPORT_SYMBOL(clk_set_rate); |
| 980 | |
| 981 | int clk_enable(struct clk *clk) |
| 982 | { |
| 983 | unsigned long clocks_flags; |
| 984 | |
| 985 | if (unlikely(!clk_good(clk))) |
| 986 | return -EINVAL; |
| 987 | |
| 988 | if (clk->parent) |
| 989 | clk_enable(clk->parent); |
| 990 | |
| 991 | spin_lock_irqsave(&clocks_lock, clocks_flags); |
| 992 | |
| 993 | clk->usage++; |
| 994 | if (clk->ops && clk->ops->enable) |
| 995 | clk->ops->enable(clk); |
| 996 | |
| 997 | spin_unlock_irqrestore(&clocks_lock, clocks_flags); |
| 998 | return 0; |
| 999 | } |
| 1000 | EXPORT_SYMBOL(clk_enable); |
| 1001 | |
| 1002 | static void local_clk_disable(struct clk *clk) |
| 1003 | { |
| 1004 | if (unlikely(!clk_good(clk))) |
| 1005 | return; |
| 1006 | |
| 1007 | if (clk->usage == 0 && clk->ops->disable) |
| 1008 | clk->ops->disable(clk); |
| 1009 | |
| 1010 | if (clk->parent) |
| 1011 | local_clk_disable(clk->parent); |
| 1012 | } |
| 1013 | |
| 1014 | void clk_disable(struct clk *clk) |
| 1015 | { |
| 1016 | unsigned long clocks_flags; |
| 1017 | |
| 1018 | if (unlikely(!clk_good(clk))) |
| 1019 | return; |
| 1020 | |
| 1021 | spin_lock_irqsave(&clocks_lock, clocks_flags); |
| 1022 | |
| 1023 | if ((--clk->usage) == 0 && clk->ops->disable) |
| 1024 | clk->ops->disable(clk); |
| 1025 | |
| 1026 | spin_unlock_irqrestore(&clocks_lock, clocks_flags); |
| 1027 | if (clk->parent) |
| 1028 | clk_disable(clk->parent); |
| 1029 | } |
| 1030 | EXPORT_SYMBOL(clk_disable); |
| 1031 | |
| 1032 | /* Some additional API */ |
| 1033 | int clk_set_parent(struct clk *clk, struct clk *parent) |
| 1034 | { |
| 1035 | int ret = -ENODEV; |
| 1036 | unsigned long clocks_flags; |
| 1037 | |
| 1038 | if (unlikely(!clk_good(clk))) |
| 1039 | goto out; |
| 1040 | |
| 1041 | if (!clk->ops->set_parent) |
| 1042 | goto out; |
| 1043 | |
| 1044 | spin_lock_irqsave(&clocks_lock, clocks_flags); |
| 1045 | |
| 1046 | ret = clk->ops->set_parent(clk, parent); |
| 1047 | if (!ret) { |
| 1048 | /* disable if usage count is 0 */ |
| 1049 | local_clk_disable(parent); |
| 1050 | |
| 1051 | parent->usage += clk->usage; |
| 1052 | clk->parent->usage -= clk->usage; |
| 1053 | |
| 1054 | /* disable if new usage count is 0 */ |
| 1055 | local_clk_disable(clk->parent); |
| 1056 | |
| 1057 | clk->parent = parent; |
| 1058 | } |
| 1059 | spin_unlock_irqrestore(&clocks_lock, clocks_flags); |
| 1060 | |
| 1061 | out: |
| 1062 | return ret; |
| 1063 | } |
| 1064 | EXPORT_SYMBOL(clk_set_parent); |
| 1065 | |
| 1066 | struct clk *clk_get_parent(struct clk *clk) |
| 1067 | { |
| 1068 | if (unlikely(!clk_good(clk))) |
| 1069 | return NULL; |
| 1070 | return clk->parent; |
| 1071 | } |
| 1072 | EXPORT_SYMBOL(clk_get_parent); |
| 1073 | |
| 1074 | static int __init clk_init(void) |
| 1075 | { |
| 1076 | struct clk_lookup *cl; |
| 1077 | struct clk_ops *ops; |
| 1078 | |
| 1079 | spin_lock_init(&clocks_lock); |
| 1080 | |
| 1081 | for (cl = onchip_clks; cl < onchip_clks + ARRAY_SIZE(onchip_clks); |
| 1082 | cl++) { |
| 1083 | if (cl->clk->flags & ENABLED) |
| 1084 | clk_enable(cl->clk); |
| 1085 | else |
| 1086 | local_clk_disable(cl->clk); |
| 1087 | |
| 1088 | ops = cl->clk->ops; |
| 1089 | |
| 1090 | if ((cl->clk->flags & NEEDS_INITIALIZATION) && |
| 1091 | ops && ops->set_rate) |
| 1092 | ops->set_rate(cl->clk, cl->clk->rate); |
| 1093 | |
| 1094 | if (cl->clk->flags & FIXED_RATE) { |
| 1095 | if (cl->clk->flags & RATE_PROPAGATES) |
| 1096 | propagate_rate(cl->clk); |
| 1097 | } else { |
| 1098 | if (ops && ops->get_rate) |
| 1099 | ops->get_rate(cl->clk); |
| 1100 | } |
| 1101 | |
| 1102 | if (cl->clk->flags & NEEDS_SET_PARENT) { |
| 1103 | if (ops && ops->set_parent) |
| 1104 | ops->set_parent(cl->clk, cl->clk->parent); |
| 1105 | } |
| 1106 | |
| 1107 | clkdev_add(cl); |
| 1108 | } |
| 1109 | return 0; |
| 1110 | } |
| 1111 | |
| 1112 | arch_initcall(clk_init); |