Magnus Damm | 8b5ee11 | 2010-05-11 13:29:25 +0000 | [diff] [blame] | 1 | /* |
| 2 | * drivers/sh/clk.c - SuperH clock framework |
| 3 | * |
| 4 | * Copyright (C) 2005 - 2009 Paul Mundt |
| 5 | * |
| 6 | * This clock framework is derived from the OMAP version by: |
| 7 | * |
| 8 | * Copyright (C) 2004 - 2008 Nokia Corporation |
| 9 | * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> |
| 10 | * |
| 11 | * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> |
| 12 | * |
| 13 | * This file is subject to the terms and conditions of the GNU General Public |
| 14 | * License. See the file "COPYING" in the main directory of this archive |
| 15 | * for more details. |
| 16 | */ |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/init.h> |
| 19 | #include <linux/module.h> |
| 20 | #include <linux/mutex.h> |
| 21 | #include <linux/list.h> |
| 22 | #include <linux/kobject.h> |
| 23 | #include <linux/sysdev.h> |
| 24 | #include <linux/seq_file.h> |
| 25 | #include <linux/err.h> |
| 26 | #include <linux/platform_device.h> |
| 27 | #include <linux/debugfs.h> |
| 28 | #include <linux/cpufreq.h> |
| 29 | #include <linux/clk.h> |
| 30 | #include <linux/sh_clk.h> |
| 31 | |
| 32 | static LIST_HEAD(clock_list); |
| 33 | static DEFINE_SPINLOCK(clock_lock); |
| 34 | static DEFINE_MUTEX(clock_list_sem); |
| 35 | |
| 36 | void clk_rate_table_build(struct clk *clk, |
| 37 | struct cpufreq_frequency_table *freq_table, |
| 38 | int nr_freqs, |
| 39 | struct clk_div_mult_table *src_table, |
| 40 | unsigned long *bitmap) |
| 41 | { |
| 42 | unsigned long mult, div; |
| 43 | unsigned long freq; |
| 44 | int i; |
| 45 | |
| 46 | for (i = 0; i < nr_freqs; i++) { |
| 47 | div = 1; |
| 48 | mult = 1; |
| 49 | |
| 50 | if (src_table->divisors && i < src_table->nr_divisors) |
| 51 | div = src_table->divisors[i]; |
| 52 | |
| 53 | if (src_table->multipliers && i < src_table->nr_multipliers) |
| 54 | mult = src_table->multipliers[i]; |
| 55 | |
| 56 | if (!div || !mult || (bitmap && !test_bit(i, bitmap))) |
| 57 | freq = CPUFREQ_ENTRY_INVALID; |
| 58 | else |
| 59 | freq = clk->parent->rate * mult / div; |
| 60 | |
| 61 | freq_table[i].index = i; |
| 62 | freq_table[i].frequency = freq; |
| 63 | } |
| 64 | |
| 65 | /* Termination entry */ |
| 66 | freq_table[i].index = i; |
| 67 | freq_table[i].frequency = CPUFREQ_TABLE_END; |
| 68 | } |
| 69 | |
| 70 | long clk_rate_table_round(struct clk *clk, |
| 71 | struct cpufreq_frequency_table *freq_table, |
| 72 | unsigned long rate) |
| 73 | { |
| 74 | unsigned long rate_error, rate_error_prev = ~0UL; |
| 75 | unsigned long rate_best_fit = rate; |
| 76 | unsigned long highest, lowest; |
| 77 | int i; |
| 78 | |
| 79 | highest = lowest = 0; |
| 80 | |
| 81 | for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { |
| 82 | unsigned long freq = freq_table[i].frequency; |
| 83 | |
| 84 | if (freq == CPUFREQ_ENTRY_INVALID) |
| 85 | continue; |
| 86 | |
| 87 | if (freq > highest) |
| 88 | highest = freq; |
| 89 | if (freq < lowest) |
| 90 | lowest = freq; |
| 91 | |
| 92 | rate_error = abs(freq - rate); |
| 93 | if (rate_error < rate_error_prev) { |
| 94 | rate_best_fit = freq; |
| 95 | rate_error_prev = rate_error; |
| 96 | } |
| 97 | |
| 98 | if (rate_error == 0) |
| 99 | break; |
| 100 | } |
| 101 | |
| 102 | if (rate >= highest) |
| 103 | rate_best_fit = highest; |
| 104 | if (rate <= lowest) |
| 105 | rate_best_fit = lowest; |
| 106 | |
| 107 | return rate_best_fit; |
| 108 | } |
| 109 | |
| 110 | int clk_rate_table_find(struct clk *clk, |
| 111 | struct cpufreq_frequency_table *freq_table, |
| 112 | unsigned long rate) |
| 113 | { |
| 114 | int i; |
| 115 | |
| 116 | for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { |
| 117 | unsigned long freq = freq_table[i].frequency; |
| 118 | |
| 119 | if (freq == CPUFREQ_ENTRY_INVALID) |
| 120 | continue; |
| 121 | |
| 122 | if (freq == rate) |
| 123 | return i; |
| 124 | } |
| 125 | |
| 126 | return -ENOENT; |
| 127 | } |
| 128 | |
| 129 | /* Used for clocks that always have same value as the parent clock */ |
| 130 | unsigned long followparent_recalc(struct clk *clk) |
| 131 | { |
| 132 | return clk->parent ? clk->parent->rate : 0; |
| 133 | } |
| 134 | |
| 135 | int clk_reparent(struct clk *child, struct clk *parent) |
| 136 | { |
| 137 | list_del_init(&child->sibling); |
| 138 | if (parent) |
| 139 | list_add(&child->sibling, &parent->children); |
| 140 | child->parent = parent; |
| 141 | |
| 142 | /* now do the debugfs renaming to reattach the child |
| 143 | to the proper parent */ |
| 144 | |
| 145 | return 0; |
| 146 | } |
| 147 | |
| 148 | /* Propagate rate to children */ |
| 149 | void propagate_rate(struct clk *tclk) |
| 150 | { |
| 151 | struct clk *clkp; |
| 152 | |
| 153 | list_for_each_entry(clkp, &tclk->children, sibling) { |
| 154 | if (clkp->ops && clkp->ops->recalc) |
| 155 | clkp->rate = clkp->ops->recalc(clkp); |
| 156 | |
| 157 | propagate_rate(clkp); |
| 158 | } |
| 159 | } |
| 160 | |
| 161 | static void __clk_disable(struct clk *clk) |
| 162 | { |
Guennadi Liakhovetski | f5ca6d4 | 2010-05-04 14:15:09 +0000 | [diff] [blame] | 163 | if (WARN(!clk->usecount, "Trying to disable clock %s with 0 usecount\n", |
| 164 | clk->name)) |
Magnus Damm | 8b5ee11 | 2010-05-11 13:29:25 +0000 | [diff] [blame] | 165 | return; |
Magnus Damm | 8b5ee11 | 2010-05-11 13:29:25 +0000 | [diff] [blame] | 166 | |
| 167 | if (!(--clk->usecount)) { |
| 168 | if (likely(clk->ops && clk->ops->disable)) |
| 169 | clk->ops->disable(clk); |
| 170 | if (likely(clk->parent)) |
| 171 | __clk_disable(clk->parent); |
| 172 | } |
| 173 | } |
| 174 | |
| 175 | void clk_disable(struct clk *clk) |
| 176 | { |
| 177 | unsigned long flags; |
| 178 | |
| 179 | if (!clk) |
| 180 | return; |
| 181 | |
| 182 | spin_lock_irqsave(&clock_lock, flags); |
| 183 | __clk_disable(clk); |
| 184 | spin_unlock_irqrestore(&clock_lock, flags); |
| 185 | } |
| 186 | EXPORT_SYMBOL_GPL(clk_disable); |
| 187 | |
| 188 | static int __clk_enable(struct clk *clk) |
| 189 | { |
| 190 | int ret = 0; |
| 191 | |
| 192 | if (clk->usecount++ == 0) { |
| 193 | if (clk->parent) { |
| 194 | ret = __clk_enable(clk->parent); |
| 195 | if (unlikely(ret)) |
| 196 | goto err; |
| 197 | } |
| 198 | |
| 199 | if (clk->ops && clk->ops->enable) { |
| 200 | ret = clk->ops->enable(clk); |
| 201 | if (ret) { |
| 202 | if (clk->parent) |
| 203 | __clk_disable(clk->parent); |
| 204 | goto err; |
| 205 | } |
| 206 | } |
| 207 | } |
| 208 | |
| 209 | return ret; |
| 210 | err: |
| 211 | clk->usecount--; |
| 212 | return ret; |
| 213 | } |
| 214 | |
| 215 | int clk_enable(struct clk *clk) |
| 216 | { |
| 217 | unsigned long flags; |
| 218 | int ret; |
| 219 | |
| 220 | if (!clk) |
| 221 | return -EINVAL; |
| 222 | |
| 223 | spin_lock_irqsave(&clock_lock, flags); |
| 224 | ret = __clk_enable(clk); |
| 225 | spin_unlock_irqrestore(&clock_lock, flags); |
| 226 | |
| 227 | return ret; |
| 228 | } |
| 229 | EXPORT_SYMBOL_GPL(clk_enable); |
| 230 | |
| 231 | static LIST_HEAD(root_clks); |
| 232 | |
| 233 | /** |
| 234 | * recalculate_root_clocks - recalculate and propagate all root clocks |
| 235 | * |
| 236 | * Recalculates all root clocks (clocks with no parent), which if the |
| 237 | * clock's .recalc is set correctly, should also propagate their rates. |
| 238 | * Called at init. |
| 239 | */ |
| 240 | void recalculate_root_clocks(void) |
| 241 | { |
| 242 | struct clk *clkp; |
| 243 | |
| 244 | list_for_each_entry(clkp, &root_clks, sibling) { |
| 245 | if (clkp->ops && clkp->ops->recalc) |
| 246 | clkp->rate = clkp->ops->recalc(clkp); |
| 247 | propagate_rate(clkp); |
| 248 | } |
| 249 | } |
| 250 | |
| 251 | int clk_register(struct clk *clk) |
| 252 | { |
| 253 | if (clk == NULL || IS_ERR(clk)) |
| 254 | return -EINVAL; |
| 255 | |
| 256 | /* |
| 257 | * trap out already registered clocks |
| 258 | */ |
| 259 | if (clk->node.next || clk->node.prev) |
| 260 | return 0; |
| 261 | |
| 262 | mutex_lock(&clock_list_sem); |
| 263 | |
| 264 | INIT_LIST_HEAD(&clk->children); |
| 265 | clk->usecount = 0; |
| 266 | |
| 267 | if (clk->parent) |
| 268 | list_add(&clk->sibling, &clk->parent->children); |
| 269 | else |
| 270 | list_add(&clk->sibling, &root_clks); |
| 271 | |
| 272 | list_add(&clk->node, &clock_list); |
| 273 | if (clk->ops && clk->ops->init) |
| 274 | clk->ops->init(clk); |
| 275 | mutex_unlock(&clock_list_sem); |
| 276 | |
| 277 | return 0; |
| 278 | } |
| 279 | EXPORT_SYMBOL_GPL(clk_register); |
| 280 | |
| 281 | void clk_unregister(struct clk *clk) |
| 282 | { |
| 283 | mutex_lock(&clock_list_sem); |
| 284 | list_del(&clk->sibling); |
| 285 | list_del(&clk->node); |
| 286 | mutex_unlock(&clock_list_sem); |
| 287 | } |
| 288 | EXPORT_SYMBOL_GPL(clk_unregister); |
| 289 | |
| 290 | void clk_enable_init_clocks(void) |
| 291 | { |
| 292 | struct clk *clkp; |
| 293 | |
| 294 | list_for_each_entry(clkp, &clock_list, node) |
| 295 | if (clkp->flags & CLK_ENABLE_ON_INIT) |
| 296 | clk_enable(clkp); |
| 297 | } |
| 298 | |
| 299 | unsigned long clk_get_rate(struct clk *clk) |
| 300 | { |
| 301 | return clk->rate; |
| 302 | } |
| 303 | EXPORT_SYMBOL_GPL(clk_get_rate); |
| 304 | |
| 305 | int clk_set_rate(struct clk *clk, unsigned long rate) |
| 306 | { |
| 307 | return clk_set_rate_ex(clk, rate, 0); |
| 308 | } |
| 309 | EXPORT_SYMBOL_GPL(clk_set_rate); |
| 310 | |
| 311 | int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) |
| 312 | { |
| 313 | int ret = -EOPNOTSUPP; |
| 314 | unsigned long flags; |
| 315 | |
| 316 | spin_lock_irqsave(&clock_lock, flags); |
| 317 | |
| 318 | if (likely(clk->ops && clk->ops->set_rate)) { |
| 319 | ret = clk->ops->set_rate(clk, rate, algo_id); |
| 320 | if (ret != 0) |
| 321 | goto out_unlock; |
| 322 | } else { |
| 323 | clk->rate = rate; |
| 324 | ret = 0; |
| 325 | } |
| 326 | |
| 327 | if (clk->ops && clk->ops->recalc) |
| 328 | clk->rate = clk->ops->recalc(clk); |
| 329 | |
| 330 | propagate_rate(clk); |
| 331 | |
| 332 | out_unlock: |
| 333 | spin_unlock_irqrestore(&clock_lock, flags); |
| 334 | |
| 335 | return ret; |
| 336 | } |
| 337 | EXPORT_SYMBOL_GPL(clk_set_rate_ex); |
| 338 | |
| 339 | int clk_set_parent(struct clk *clk, struct clk *parent) |
| 340 | { |
| 341 | unsigned long flags; |
| 342 | int ret = -EINVAL; |
| 343 | |
| 344 | if (!parent || !clk) |
| 345 | return ret; |
| 346 | if (clk->parent == parent) |
| 347 | return 0; |
| 348 | |
| 349 | spin_lock_irqsave(&clock_lock, flags); |
| 350 | if (clk->usecount == 0) { |
| 351 | if (clk->ops->set_parent) |
| 352 | ret = clk->ops->set_parent(clk, parent); |
| 353 | else |
| 354 | ret = clk_reparent(clk, parent); |
| 355 | |
| 356 | if (ret == 0) { |
| 357 | pr_debug("clock: set parent of %s to %s (new rate %ld)\n", |
| 358 | clk->name, clk->parent->name, clk->rate); |
| 359 | if (clk->ops->recalc) |
| 360 | clk->rate = clk->ops->recalc(clk); |
| 361 | propagate_rate(clk); |
| 362 | } |
| 363 | } else |
| 364 | ret = -EBUSY; |
| 365 | spin_unlock_irqrestore(&clock_lock, flags); |
| 366 | |
| 367 | return ret; |
| 368 | } |
| 369 | EXPORT_SYMBOL_GPL(clk_set_parent); |
| 370 | |
| 371 | struct clk *clk_get_parent(struct clk *clk) |
| 372 | { |
| 373 | return clk->parent; |
| 374 | } |
| 375 | EXPORT_SYMBOL_GPL(clk_get_parent); |
| 376 | |
| 377 | long clk_round_rate(struct clk *clk, unsigned long rate) |
| 378 | { |
| 379 | if (likely(clk->ops && clk->ops->round_rate)) { |
| 380 | unsigned long flags, rounded; |
| 381 | |
| 382 | spin_lock_irqsave(&clock_lock, flags); |
| 383 | rounded = clk->ops->round_rate(clk, rate); |
| 384 | spin_unlock_irqrestore(&clock_lock, flags); |
| 385 | |
| 386 | return rounded; |
| 387 | } |
| 388 | |
| 389 | return clk_get_rate(clk); |
| 390 | } |
| 391 | EXPORT_SYMBOL_GPL(clk_round_rate); |
| 392 | |
| 393 | #ifdef CONFIG_PM |
| 394 | static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) |
| 395 | { |
| 396 | static pm_message_t prev_state; |
| 397 | struct clk *clkp; |
| 398 | |
| 399 | switch (state.event) { |
| 400 | case PM_EVENT_ON: |
| 401 | /* Resumeing from hibernation */ |
| 402 | if (prev_state.event != PM_EVENT_FREEZE) |
| 403 | break; |
| 404 | |
| 405 | list_for_each_entry(clkp, &clock_list, node) { |
| 406 | if (likely(clkp->ops)) { |
| 407 | unsigned long rate = clkp->rate; |
| 408 | |
| 409 | if (likely(clkp->ops->set_parent)) |
| 410 | clkp->ops->set_parent(clkp, |
| 411 | clkp->parent); |
| 412 | if (likely(clkp->ops->set_rate)) |
| 413 | clkp->ops->set_rate(clkp, |
| 414 | rate, NO_CHANGE); |
| 415 | else if (likely(clkp->ops->recalc)) |
| 416 | clkp->rate = clkp->ops->recalc(clkp); |
| 417 | } |
| 418 | } |
| 419 | break; |
| 420 | case PM_EVENT_FREEZE: |
| 421 | break; |
| 422 | case PM_EVENT_SUSPEND: |
| 423 | break; |
| 424 | } |
| 425 | |
| 426 | prev_state = state; |
| 427 | return 0; |
| 428 | } |
| 429 | |
| 430 | static int clks_sysdev_resume(struct sys_device *dev) |
| 431 | { |
| 432 | return clks_sysdev_suspend(dev, PMSG_ON); |
| 433 | } |
| 434 | |
| 435 | static struct sysdev_class clks_sysdev_class = { |
| 436 | .name = "clks", |
| 437 | }; |
| 438 | |
| 439 | static struct sysdev_driver clks_sysdev_driver = { |
| 440 | .suspend = clks_sysdev_suspend, |
| 441 | .resume = clks_sysdev_resume, |
| 442 | }; |
| 443 | |
| 444 | static struct sys_device clks_sysdev_dev = { |
| 445 | .cls = &clks_sysdev_class, |
| 446 | }; |
| 447 | |
| 448 | static int __init clk_sysdev_init(void) |
| 449 | { |
| 450 | sysdev_class_register(&clks_sysdev_class); |
| 451 | sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver); |
| 452 | sysdev_register(&clks_sysdev_dev); |
| 453 | |
| 454 | return 0; |
| 455 | } |
| 456 | subsys_initcall(clk_sysdev_init); |
| 457 | #endif |
| 458 | |
| 459 | /* |
| 460 | * debugfs support to trace clock tree hierarchy and attributes |
| 461 | */ |
| 462 | static struct dentry *clk_debugfs_root; |
| 463 | |
| 464 | static int clk_debugfs_register_one(struct clk *c) |
| 465 | { |
| 466 | int err; |
| 467 | struct dentry *d, *child, *child_tmp; |
| 468 | struct clk *pa = c->parent; |
| 469 | char s[255]; |
| 470 | char *p = s; |
| 471 | |
| 472 | p += sprintf(p, "%s", c->name); |
| 473 | if (c->id >= 0) |
| 474 | sprintf(p, ":%d", c->id); |
| 475 | d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root); |
| 476 | if (!d) |
| 477 | return -ENOMEM; |
| 478 | c->dentry = d; |
| 479 | |
| 480 | d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount); |
| 481 | if (!d) { |
| 482 | err = -ENOMEM; |
| 483 | goto err_out; |
| 484 | } |
| 485 | d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate); |
| 486 | if (!d) { |
| 487 | err = -ENOMEM; |
| 488 | goto err_out; |
| 489 | } |
| 490 | d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags); |
| 491 | if (!d) { |
| 492 | err = -ENOMEM; |
| 493 | goto err_out; |
| 494 | } |
| 495 | return 0; |
| 496 | |
| 497 | err_out: |
| 498 | d = c->dentry; |
| 499 | list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child) |
| 500 | debugfs_remove(child); |
| 501 | debugfs_remove(c->dentry); |
| 502 | return err; |
| 503 | } |
| 504 | |
| 505 | static int clk_debugfs_register(struct clk *c) |
| 506 | { |
| 507 | int err; |
| 508 | struct clk *pa = c->parent; |
| 509 | |
| 510 | if (pa && !pa->dentry) { |
| 511 | err = clk_debugfs_register(pa); |
| 512 | if (err) |
| 513 | return err; |
| 514 | } |
| 515 | |
| 516 | if (!c->dentry && c->name) { |
| 517 | err = clk_debugfs_register_one(c); |
| 518 | if (err) |
| 519 | return err; |
| 520 | } |
| 521 | return 0; |
| 522 | } |
| 523 | |
| 524 | static int __init clk_debugfs_init(void) |
| 525 | { |
| 526 | struct clk *c; |
| 527 | struct dentry *d; |
| 528 | int err; |
| 529 | |
| 530 | d = debugfs_create_dir("clock", NULL); |
| 531 | if (!d) |
| 532 | return -ENOMEM; |
| 533 | clk_debugfs_root = d; |
| 534 | |
| 535 | list_for_each_entry(c, &clock_list, node) { |
| 536 | err = clk_debugfs_register(c); |
| 537 | if (err) |
| 538 | goto err_out; |
| 539 | } |
| 540 | return 0; |
| 541 | err_out: |
| 542 | debugfs_remove_recursive(clk_debugfs_root); |
| 543 | return err; |
| 544 | } |
| 545 | late_initcall(clk_debugfs_init); |