Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Tony Lindgren | b915855 | 2005-07-10 19:58:14 +0100 | [diff] [blame] | 2 | * linux/arch/arm/plat-omap/clock.c |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 4 | * Copyright (C) 2004 - 2008 Nokia corporation |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> |
| 6 | * |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 7 | * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> |
| 8 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License version 2 as |
| 11 | * published by the Free Software Foundation. |
| 12 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/kernel.h> |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 14 | #include <linux/init.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/list.h> |
| 16 | #include <linux/errno.h> |
| 17 | #include <linux/err.h> |
Tim Schmielau | 4e57b68 | 2005-10-30 15:03:48 -0800 | [diff] [blame] | 18 | #include <linux/string.h> |
Russell King | f8ce254 | 2006-01-07 16:15:52 +0000 | [diff] [blame] | 19 | #include <linux/clk.h> |
Arjan van de Ven | 0043170 | 2006-01-12 18:42:23 +0000 | [diff] [blame] | 20 | #include <linux/mutex.h> |
Russell King | b851cb2 | 2008-05-22 16:38:50 +0100 | [diff] [blame] | 21 | #include <linux/cpufreq.h> |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 22 | #include <linux/debugfs.h> |
Russell King | fced80c | 2008-09-06 12:10:45 +0100 | [diff] [blame] | 23 | #include <linux/io.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
Tony Lindgren | ce491cf | 2009-10-20 09:40:47 -0700 | [diff] [blame] | 25 | #include <plat/clock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
Juha Yrjola | 7df3450 | 2006-06-26 16:16:22 -0700 | [diff] [blame] | 27 | static LIST_HEAD(clocks); |
Arjan van de Ven | 0043170 | 2006-01-12 18:42:23 +0000 | [diff] [blame] | 28 | static DEFINE_MUTEX(clocks_mutex); |
Juha Yrjola | 7df3450 | 2006-06-26 16:16:22 -0700 | [diff] [blame] | 29 | static DEFINE_SPINLOCK(clockfw_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 31 | static struct clk_functions *arch_clock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | |
Paul Walmsley | 3587aeb | 2010-05-18 18:40:26 -0600 | [diff] [blame] | 33 | /* |
Tony Lindgren | f07adc5 | 2006-01-17 15:27:09 -0800 | [diff] [blame] | 34 | * Standard clock functions defined in include/linux/clk.h |
Paul Walmsley | 3587aeb | 2010-05-18 18:40:26 -0600 | [diff] [blame] | 35 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 37 | int clk_enable(struct clk *clk) |
| 38 | { |
| 39 | unsigned long flags; |
| 40 | int ret = 0; |
| 41 | |
Tony Lindgren | b824efa | 2006-04-02 17:46:20 +0100 | [diff] [blame] | 42 | if (clk == NULL || IS_ERR(clk)) |
| 43 | return -EINVAL; |
| 44 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 45 | spin_lock_irqsave(&clockfw_lock, flags); |
Tony Lindgren | f07adc5 | 2006-01-17 15:27:09 -0800 | [diff] [blame] | 46 | if (arch_clock->clk_enable) |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 47 | ret = arch_clock->clk_enable(clk); |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 48 | spin_unlock_irqrestore(&clockfw_lock, flags); |
| 49 | |
| 50 | return ret; |
| 51 | } |
| 52 | EXPORT_SYMBOL(clk_enable); |
| 53 | |
| 54 | void clk_disable(struct clk *clk) |
| 55 | { |
| 56 | unsigned long flags; |
| 57 | |
Tony Lindgren | b824efa | 2006-04-02 17:46:20 +0100 | [diff] [blame] | 58 | if (clk == NULL || IS_ERR(clk)) |
| 59 | return; |
| 60 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 61 | spin_lock_irqsave(&clockfw_lock, flags); |
Tony Lindgren | 7cf9577 | 2007-08-07 05:20:00 -0700 | [diff] [blame] | 62 | if (clk->usecount == 0) { |
Paul Walmsley | 6041c27 | 2010-10-08 11:40:20 -0600 | [diff] [blame] | 63 | pr_err("Trying disable clock %s with 0 usecount\n", |
Tony Lindgren | 7cf9577 | 2007-08-07 05:20:00 -0700 | [diff] [blame] | 64 | clk->name); |
| 65 | WARN_ON(1); |
| 66 | goto out; |
| 67 | } |
| 68 | |
Tony Lindgren | f07adc5 | 2006-01-17 15:27:09 -0800 | [diff] [blame] | 69 | if (arch_clock->clk_disable) |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 70 | arch_clock->clk_disable(clk); |
Tony Lindgren | 7cf9577 | 2007-08-07 05:20:00 -0700 | [diff] [blame] | 71 | |
| 72 | out: |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 73 | spin_unlock_irqrestore(&clockfw_lock, flags); |
| 74 | } |
| 75 | EXPORT_SYMBOL(clk_disable); |
| 76 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 77 | unsigned long clk_get_rate(struct clk *clk) |
| 78 | { |
| 79 | unsigned long flags; |
| 80 | unsigned long ret = 0; |
| 81 | |
Tony Lindgren | b824efa | 2006-04-02 17:46:20 +0100 | [diff] [blame] | 82 | if (clk == NULL || IS_ERR(clk)) |
| 83 | return 0; |
| 84 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 85 | spin_lock_irqsave(&clockfw_lock, flags); |
| 86 | ret = clk->rate; |
| 87 | spin_unlock_irqrestore(&clockfw_lock, flags); |
| 88 | |
| 89 | return ret; |
| 90 | } |
| 91 | EXPORT_SYMBOL(clk_get_rate); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | |
Paul Walmsley | 3587aeb | 2010-05-18 18:40:26 -0600 | [diff] [blame] | 93 | /* |
Tony Lindgren | f07adc5 | 2006-01-17 15:27:09 -0800 | [diff] [blame] | 94 | * Optional clock functions defined in include/linux/clk.h |
Paul Walmsley | 3587aeb | 2010-05-18 18:40:26 -0600 | [diff] [blame] | 95 | */ |
Tony Lindgren | bb13b5f | 2005-07-10 19:58:18 +0100 | [diff] [blame] | 96 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | long clk_round_rate(struct clk *clk, unsigned long rate) |
| 98 | { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 99 | unsigned long flags; |
| 100 | long ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | |
Tony Lindgren | b824efa | 2006-04-02 17:46:20 +0100 | [diff] [blame] | 102 | if (clk == NULL || IS_ERR(clk)) |
| 103 | return ret; |
| 104 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 105 | spin_lock_irqsave(&clockfw_lock, flags); |
| 106 | if (arch_clock->clk_round_rate) |
| 107 | ret = arch_clock->clk_round_rate(clk, rate); |
| 108 | spin_unlock_irqrestore(&clockfw_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 110 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | } |
| 112 | EXPORT_SYMBOL(clk_round_rate); |
| 113 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | int clk_set_rate(struct clk *clk, unsigned long rate) |
| 115 | { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 116 | unsigned long flags; |
Tony Lindgren | b824efa | 2006-04-02 17:46:20 +0100 | [diff] [blame] | 117 | int ret = -EINVAL; |
| 118 | |
| 119 | if (clk == NULL || IS_ERR(clk)) |
| 120 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 122 | spin_lock_irqsave(&clockfw_lock, flags); |
| 123 | if (arch_clock->clk_set_rate) |
| 124 | ret = arch_clock->clk_set_rate(clk, rate); |
Russell King | b5088c0 | 2009-01-29 19:33:19 +0000 | [diff] [blame] | 125 | if (ret == 0) { |
| 126 | if (clk->recalc) |
Russell King | 8b9dbc1 | 2009-02-12 10:12:59 +0000 | [diff] [blame] | 127 | clk->rate = clk->recalc(clk); |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 128 | propagate_rate(clk); |
Russell King | b5088c0 | 2009-01-29 19:33:19 +0000 | [diff] [blame] | 129 | } |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 130 | spin_unlock_irqrestore(&clockfw_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | |
| 132 | return ret; |
| 133 | } |
| 134 | EXPORT_SYMBOL(clk_set_rate); |
| 135 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 136 | int clk_set_parent(struct clk *clk, struct clk *parent) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 138 | unsigned long flags; |
Tony Lindgren | b824efa | 2006-04-02 17:46:20 +0100 | [diff] [blame] | 139 | int ret = -EINVAL; |
| 140 | |
| 141 | if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent)) |
| 142 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 144 | spin_lock_irqsave(&clockfw_lock, flags); |
Russell King | 4da3782 | 2009-02-24 12:46:31 +0000 | [diff] [blame] | 145 | if (clk->usecount == 0) { |
| 146 | if (arch_clock->clk_set_parent) |
| 147 | ret = arch_clock->clk_set_parent(clk, parent); |
| 148 | if (ret == 0) { |
| 149 | if (clk->recalc) |
| 150 | clk->rate = clk->recalc(clk); |
| 151 | propagate_rate(clk); |
| 152 | } |
| 153 | } else |
| 154 | ret = -EBUSY; |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 155 | spin_unlock_irqrestore(&clockfw_lock, flags); |
| 156 | |
| 157 | return ret; |
| 158 | } |
| 159 | EXPORT_SYMBOL(clk_set_parent); |
| 160 | |
| 161 | struct clk *clk_get_parent(struct clk *clk) |
| 162 | { |
Russell King | 2e777bf | 2009-02-08 17:49:22 +0000 | [diff] [blame] | 163 | return clk->parent; |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 164 | } |
| 165 | EXPORT_SYMBOL(clk_get_parent); |
| 166 | |
Paul Walmsley | 3587aeb | 2010-05-18 18:40:26 -0600 | [diff] [blame] | 167 | /* |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 168 | * OMAP specific clock functions shared between omap1 and omap2 |
Paul Walmsley | 3587aeb | 2010-05-18 18:40:26 -0600 | [diff] [blame] | 169 | */ |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 170 | |
Paul Walmsley | d373019 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 171 | int __initdata mpurate; |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 172 | |
| 173 | /* |
| 174 | * By default we use the rate set by the bootloader. |
| 175 | * You can override this with mpurate= cmdline option. |
| 176 | */ |
| 177 | static int __init omap_clk_setup(char *str) |
| 178 | { |
| 179 | get_option(&str, &mpurate); |
| 180 | |
| 181 | if (!mpurate) |
| 182 | return 1; |
| 183 | |
| 184 | if (mpurate < 1000) |
| 185 | mpurate *= 1000000; |
| 186 | |
| 187 | return 1; |
| 188 | } |
| 189 | __setup("mpurate=", omap_clk_setup); |
| 190 | |
| 191 | /* Used for clocks that always have same value as the parent clock */ |
Russell King | 8b9dbc1 | 2009-02-12 10:12:59 +0000 | [diff] [blame] | 192 | unsigned long followparent_recalc(struct clk *clk) |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 193 | { |
Russell King | 8b9dbc1 | 2009-02-12 10:12:59 +0000 | [diff] [blame] | 194 | return clk->parent->rate; |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 195 | } |
| 196 | |
Paul Walmsley | e9b98f6 | 2010-01-26 20:12:57 -0700 | [diff] [blame] | 197 | /* |
| 198 | * Used for clocks that have the same value as the parent clock, |
| 199 | * divided by some factor |
| 200 | */ |
| 201 | unsigned long omap_fixed_divisor_recalc(struct clk *clk) |
| 202 | { |
| 203 | WARN_ON(!clk->fixed_div); |
| 204 | |
| 205 | return clk->parent->rate / clk->fixed_div; |
| 206 | } |
| 207 | |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 208 | void clk_reparent(struct clk *child, struct clk *parent) |
| 209 | { |
| 210 | list_del_init(&child->sibling); |
| 211 | if (parent) |
| 212 | list_add(&child->sibling, &parent->children); |
| 213 | child->parent = parent; |
| 214 | |
| 215 | /* now do the debugfs renaming to reattach the child |
| 216 | to the proper parent */ |
| 217 | } |
| 218 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 219 | /* Propagate rate to children */ |
Paul Walmsley | 3587aeb | 2010-05-18 18:40:26 -0600 | [diff] [blame] | 220 | void propagate_rate(struct clk *tclk) |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 221 | { |
| 222 | struct clk *clkp; |
| 223 | |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 224 | list_for_each_entry(clkp, &tclk->children, sibling) { |
Russell King | 9a5feda | 2008-11-13 13:44:15 +0000 | [diff] [blame] | 225 | if (clkp->recalc) |
Russell King | 8b9dbc1 | 2009-02-12 10:12:59 +0000 | [diff] [blame] | 226 | clkp->rate = clkp->recalc(clkp); |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 227 | propagate_rate(clkp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | } |
| 230 | |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 231 | static LIST_HEAD(root_clks); |
| 232 | |
Paul Walmsley | 6b8858a | 2008-03-18 10:35:15 +0200 | [diff] [blame] | 233 | /** |
| 234 | * recalculate_root_clocks - recalculate and propagate all root clocks |
| 235 | * |
| 236 | * Recalculates all root clocks (clocks with no parent), which if the |
| 237 | * clock's .recalc is set correctly, should also propagate their rates. |
| 238 | * Called at init. |
| 239 | */ |
| 240 | void recalculate_root_clocks(void) |
| 241 | { |
| 242 | struct clk *clkp; |
| 243 | |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 244 | list_for_each_entry(clkp, &root_clks, sibling) { |
| 245 | if (clkp->recalc) |
Russell King | 8b9dbc1 | 2009-02-12 10:12:59 +0000 | [diff] [blame] | 246 | clkp->rate = clkp->recalc(clkp); |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 247 | propagate_rate(clkp); |
Paul Walmsley | 6b8858a | 2008-03-18 10:35:15 +0200 | [diff] [blame] | 248 | } |
| 249 | } |
| 250 | |
Paul Walmsley | c808811 | 2009-04-22 19:48:53 -0600 | [diff] [blame] | 251 | /** |
Paul Walmsley | 7971687 | 2009-05-12 17:50:30 -0600 | [diff] [blame] | 252 | * clk_preinit - initialize any fields in the struct clk before clk init |
Paul Walmsley | c808811 | 2009-04-22 19:48:53 -0600 | [diff] [blame] | 253 | * @clk: struct clk * to initialize |
| 254 | * |
| 255 | * Initialize any struct clk fields needed before normal clk initialization |
| 256 | * can run. No return value. |
| 257 | */ |
Paul Walmsley | 7971687 | 2009-05-12 17:50:30 -0600 | [diff] [blame] | 258 | void clk_preinit(struct clk *clk) |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 259 | { |
| 260 | INIT_LIST_HEAD(&clk->children); |
| 261 | } |
| 262 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | int clk_register(struct clk *clk) |
| 264 | { |
Tony Lindgren | b824efa | 2006-04-02 17:46:20 +0100 | [diff] [blame] | 265 | if (clk == NULL || IS_ERR(clk)) |
| 266 | return -EINVAL; |
| 267 | |
Russell King | dbb674d | 2009-01-22 16:08:04 +0000 | [diff] [blame] | 268 | /* |
| 269 | * trap out already registered clocks |
| 270 | */ |
| 271 | if (clk->node.next || clk->node.prev) |
| 272 | return 0; |
| 273 | |
Arjan van de Ven | 0043170 | 2006-01-12 18:42:23 +0000 | [diff] [blame] | 274 | mutex_lock(&clocks_mutex); |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 275 | if (clk->parent) |
| 276 | list_add(&clk->sibling, &clk->parent->children); |
| 277 | else |
| 278 | list_add(&clk->sibling, &root_clks); |
| 279 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | list_add(&clk->node, &clocks); |
| 281 | if (clk->init) |
| 282 | clk->init(clk); |
Arjan van de Ven | 0043170 | 2006-01-12 18:42:23 +0000 | [diff] [blame] | 283 | mutex_unlock(&clocks_mutex); |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 284 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | return 0; |
| 286 | } |
| 287 | EXPORT_SYMBOL(clk_register); |
| 288 | |
| 289 | void clk_unregister(struct clk *clk) |
| 290 | { |
Tony Lindgren | b824efa | 2006-04-02 17:46:20 +0100 | [diff] [blame] | 291 | if (clk == NULL || IS_ERR(clk)) |
| 292 | return; |
| 293 | |
Arjan van de Ven | 0043170 | 2006-01-12 18:42:23 +0000 | [diff] [blame] | 294 | mutex_lock(&clocks_mutex); |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 295 | list_del(&clk->sibling); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | list_del(&clk->node); |
Arjan van de Ven | 0043170 | 2006-01-12 18:42:23 +0000 | [diff] [blame] | 297 | mutex_unlock(&clocks_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | } |
| 299 | EXPORT_SYMBOL(clk_unregister); |
| 300 | |
Paul Walmsley | 6b8858a | 2008-03-18 10:35:15 +0200 | [diff] [blame] | 301 | void clk_enable_init_clocks(void) |
| 302 | { |
| 303 | struct clk *clkp; |
| 304 | |
| 305 | list_for_each_entry(clkp, &clocks, node) { |
| 306 | if (clkp->flags & ENABLE_ON_INIT) |
| 307 | clk_enable(clkp); |
| 308 | } |
| 309 | } |
Paul Walmsley | 6b8858a | 2008-03-18 10:35:15 +0200 | [diff] [blame] | 310 | |
Paul Walmsley | 74be842 | 2010-02-22 22:09:29 -0700 | [diff] [blame] | 311 | /** |
| 312 | * omap_clk_get_by_name - locate OMAP struct clk by its name |
| 313 | * @name: name of the struct clk to locate |
| 314 | * |
| 315 | * Locate an OMAP struct clk by its name. Assumes that struct clk |
| 316 | * names are unique. Returns NULL if not found or a pointer to the |
| 317 | * struct clk if found. |
| 318 | */ |
| 319 | struct clk *omap_clk_get_by_name(const char *name) |
| 320 | { |
| 321 | struct clk *c; |
| 322 | struct clk *ret = NULL; |
| 323 | |
| 324 | mutex_lock(&clocks_mutex); |
| 325 | |
| 326 | list_for_each_entry(c, &clocks, node) { |
| 327 | if (!strcmp(c->name, name)) { |
| 328 | ret = c; |
| 329 | break; |
| 330 | } |
| 331 | } |
| 332 | |
| 333 | mutex_unlock(&clocks_mutex); |
| 334 | |
| 335 | return ret; |
| 336 | } |
| 337 | |
Russell King | 897dcde | 2008-11-04 16:35:03 +0000 | [diff] [blame] | 338 | /* |
| 339 | * Low level helpers |
| 340 | */ |
| 341 | static int clkll_enable_null(struct clk *clk) |
| 342 | { |
| 343 | return 0; |
| 344 | } |
| 345 | |
| 346 | static void clkll_disable_null(struct clk *clk) |
| 347 | { |
| 348 | } |
| 349 | |
| 350 | const struct clkops clkops_null = { |
| 351 | .enable = clkll_enable_null, |
| 352 | .disable = clkll_disable_null, |
| 353 | }; |
| 354 | |
Santosh Shilimkar | 7c43d54 | 2010-02-22 22:09:40 -0700 | [diff] [blame] | 355 | /* |
| 356 | * Dummy clock |
| 357 | * |
| 358 | * Used for clock aliases that are needed on some OMAPs, but not others |
| 359 | */ |
| 360 | struct clk dummy_ck = { |
| 361 | .name = "dummy", |
| 362 | .ops = &clkops_null, |
| 363 | }; |
| 364 | |
Paul Walmsley | 6b8858a | 2008-03-18 10:35:15 +0200 | [diff] [blame] | 365 | #ifdef CONFIG_CPU_FREQ |
| 366 | void clk_init_cpufreq_table(struct cpufreq_frequency_table **table) |
| 367 | { |
| 368 | unsigned long flags; |
| 369 | |
| 370 | spin_lock_irqsave(&clockfw_lock, flags); |
| 371 | if (arch_clock->clk_init_cpufreq_table) |
| 372 | arch_clock->clk_init_cpufreq_table(table); |
| 373 | spin_unlock_irqrestore(&clockfw_lock, flags); |
| 374 | } |
Paul Walmsley | 4e37c10 | 2010-01-08 15:23:16 -0700 | [diff] [blame] | 375 | |
| 376 | void clk_exit_cpufreq_table(struct cpufreq_frequency_table **table) |
| 377 | { |
| 378 | unsigned long flags; |
| 379 | |
| 380 | spin_lock_irqsave(&clockfw_lock, flags); |
| 381 | if (arch_clock->clk_exit_cpufreq_table) |
| 382 | arch_clock->clk_exit_cpufreq_table(table); |
| 383 | spin_unlock_irqrestore(&clockfw_lock, flags); |
| 384 | } |
Paul Walmsley | 6b8858a | 2008-03-18 10:35:15 +0200 | [diff] [blame] | 385 | #endif |
| 386 | |
Paul Walmsley | 3587aeb | 2010-05-18 18:40:26 -0600 | [diff] [blame] | 387 | /* |
| 388 | * |
| 389 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | |
Tony Lindgren | 90afd5c | 2006-09-25 13:27:20 +0300 | [diff] [blame] | 391 | #ifdef CONFIG_OMAP_RESET_CLOCKS |
| 392 | /* |
| 393 | * Disable any unused clocks left on by the bootloader |
| 394 | */ |
| 395 | static int __init clk_disable_unused(void) |
| 396 | { |
| 397 | struct clk *ck; |
| 398 | unsigned long flags; |
| 399 | |
Paul Walmsley | 6041c27 | 2010-10-08 11:40:20 -0600 | [diff] [blame] | 400 | pr_info("clock: disabling unused clocks to save power\n"); |
Tony Lindgren | 90afd5c | 2006-09-25 13:27:20 +0300 | [diff] [blame] | 401 | list_for_each_entry(ck, &clocks, node) { |
Russell King | 897dcde | 2008-11-04 16:35:03 +0000 | [diff] [blame] | 402 | if (ck->ops == &clkops_null) |
| 403 | continue; |
| 404 | |
Paul Walmsley | 3587aeb | 2010-05-18 18:40:26 -0600 | [diff] [blame] | 405 | if (ck->usecount > 0 || !ck->enable_reg) |
Tony Lindgren | 90afd5c | 2006-09-25 13:27:20 +0300 | [diff] [blame] | 406 | continue; |
| 407 | |
| 408 | spin_lock_irqsave(&clockfw_lock, flags); |
| 409 | if (arch_clock->clk_disable_unused) |
| 410 | arch_clock->clk_disable_unused(ck); |
| 411 | spin_unlock_irqrestore(&clockfw_lock, flags); |
| 412 | } |
| 413 | |
| 414 | return 0; |
| 415 | } |
| 416 | late_initcall(clk_disable_unused); |
| 417 | #endif |
| 418 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 419 | int __init clk_init(struct clk_functions * custom_clocks) |
| 420 | { |
| 421 | if (!custom_clocks) { |
Paul Walmsley | 6041c27 | 2010-10-08 11:40:20 -0600 | [diff] [blame] | 422 | pr_err("No custom clock functions registered\n"); |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 423 | BUG(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | } |
| 425 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 426 | arch_clock = custom_clocks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | |
| 428 | return 0; |
| 429 | } |
Paul Walmsley | 6b8858a | 2008-03-18 10:35:15 +0200 | [diff] [blame] | 430 | |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 431 | #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) |
| 432 | /* |
| 433 | * debugfs support to trace clock tree hierarchy and attributes |
| 434 | */ |
| 435 | static struct dentry *clk_debugfs_root; |
| 436 | |
| 437 | static int clk_debugfs_register_one(struct clk *c) |
| 438 | { |
| 439 | int err; |
Marek Skuczynski | 0825cc8 | 2010-01-31 10:00:54 +0000 | [diff] [blame] | 440 | struct dentry *d, *child, *child_tmp; |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 441 | struct clk *pa = c->parent; |
| 442 | char s[255]; |
| 443 | char *p = s; |
| 444 | |
| 445 | p += sprintf(p, "%s", c->name); |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 446 | d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root); |
Zhaolei | e621f26 | 2008-11-04 13:35:07 -0800 | [diff] [blame] | 447 | if (!d) |
| 448 | return -ENOMEM; |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 449 | c->dent = d; |
| 450 | |
| 451 | d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount); |
Zhaolei | e621f26 | 2008-11-04 13:35:07 -0800 | [diff] [blame] | 452 | if (!d) { |
| 453 | err = -ENOMEM; |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 454 | goto err_out; |
| 455 | } |
| 456 | d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate); |
Zhaolei | e621f26 | 2008-11-04 13:35:07 -0800 | [diff] [blame] | 457 | if (!d) { |
| 458 | err = -ENOMEM; |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 459 | goto err_out; |
| 460 | } |
| 461 | d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags); |
Zhaolei | e621f26 | 2008-11-04 13:35:07 -0800 | [diff] [blame] | 462 | if (!d) { |
| 463 | err = -ENOMEM; |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 464 | goto err_out; |
| 465 | } |
| 466 | return 0; |
| 467 | |
| 468 | err_out: |
| 469 | d = c->dent; |
Marek Skuczynski | 0825cc8 | 2010-01-31 10:00:54 +0000 | [diff] [blame] | 470 | list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child) |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 471 | debugfs_remove(child); |
| 472 | debugfs_remove(c->dent); |
| 473 | return err; |
| 474 | } |
| 475 | |
| 476 | static int clk_debugfs_register(struct clk *c) |
| 477 | { |
| 478 | int err; |
| 479 | struct clk *pa = c->parent; |
| 480 | |
| 481 | if (pa && !pa->dent) { |
| 482 | err = clk_debugfs_register(pa); |
| 483 | if (err) |
| 484 | return err; |
| 485 | } |
| 486 | |
| 487 | if (!c->dent) { |
| 488 | err = clk_debugfs_register_one(c); |
| 489 | if (err) |
| 490 | return err; |
| 491 | } |
| 492 | return 0; |
| 493 | } |
| 494 | |
| 495 | static int __init clk_debugfs_init(void) |
| 496 | { |
| 497 | struct clk *c; |
| 498 | struct dentry *d; |
| 499 | int err; |
| 500 | |
| 501 | d = debugfs_create_dir("clock", NULL); |
Zhaolei | e621f26 | 2008-11-04 13:35:07 -0800 | [diff] [blame] | 502 | if (!d) |
| 503 | return -ENOMEM; |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 504 | clk_debugfs_root = d; |
| 505 | |
| 506 | list_for_each_entry(c, &clocks, node) { |
| 507 | err = clk_debugfs_register(c); |
| 508 | if (err) |
| 509 | goto err_out; |
| 510 | } |
| 511 | return 0; |
| 512 | err_out: |
Hiroshi DOYU | ca4caa4 | 2009-09-03 20:14:06 +0300 | [diff] [blame] | 513 | debugfs_remove_recursive(clk_debugfs_root); |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 514 | return err; |
| 515 | } |
| 516 | late_initcall(clk_debugfs_init); |
| 517 | |
| 518 | #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */ |