Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Tony Lindgren | b915855 | 2005-07-10 19:58:14 +0100 | [diff] [blame] | 2 | * linux/arch/arm/plat-omap/clock.c |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 4 | * Copyright (C) 2004 - 2008 Nokia corporation |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> |
| 6 | * |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 7 | * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> |
| 8 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License version 2 as |
| 11 | * published by the Free Software Foundation. |
| 12 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/kernel.h> |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 14 | #include <linux/init.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/list.h> |
| 16 | #include <linux/errno.h> |
| 17 | #include <linux/err.h> |
Tim Schmielau | 4e57b68 | 2005-10-30 15:03:48 -0800 | [diff] [blame] | 18 | #include <linux/string.h> |
Russell King | f8ce254 | 2006-01-07 16:15:52 +0000 | [diff] [blame] | 19 | #include <linux/clk.h> |
Arjan van de Ven | 0043170 | 2006-01-12 18:42:23 +0000 | [diff] [blame] | 20 | #include <linux/mutex.h> |
Russell King | b851cb2 | 2008-05-22 16:38:50 +0100 | [diff] [blame] | 21 | #include <linux/cpufreq.h> |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 22 | #include <linux/debugfs.h> |
Russell King | fced80c | 2008-09-06 12:10:45 +0100 | [diff] [blame] | 23 | #include <linux/io.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
Tony Lindgren | ce491cf | 2009-10-20 09:40:47 -0700 | [diff] [blame] | 25 | #include <plat/clock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
Juha Yrjola | 7df3450 | 2006-06-26 16:16:22 -0700 | [diff] [blame] | 27 | static LIST_HEAD(clocks); |
Arjan van de Ven | 0043170 | 2006-01-12 18:42:23 +0000 | [diff] [blame] | 28 | static DEFINE_MUTEX(clocks_mutex); |
Juha Yrjola | 7df3450 | 2006-06-26 16:16:22 -0700 | [diff] [blame] | 29 | static DEFINE_SPINLOCK(clockfw_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 31 | static struct clk_functions *arch_clock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | |
Paul Walmsley | 3587aeb | 2010-05-18 18:40:26 -0600 | [diff] [blame] | 33 | /* |
Tony Lindgren | f07adc5 | 2006-01-17 15:27:09 -0800 | [diff] [blame] | 34 | * Standard clock functions defined in include/linux/clk.h |
Paul Walmsley | 3587aeb | 2010-05-18 18:40:26 -0600 | [diff] [blame] | 35 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 37 | int clk_enable(struct clk *clk) |
| 38 | { |
| 39 | unsigned long flags; |
Paul Walmsley | e07f469 | 2011-02-16 15:38:38 -0700 | [diff] [blame] | 40 | int ret; |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 41 | |
Tony Lindgren | b824efa | 2006-04-02 17:46:20 +0100 | [diff] [blame] | 42 | if (clk == NULL || IS_ERR(clk)) |
| 43 | return -EINVAL; |
| 44 | |
Paul Walmsley | e07f469 | 2011-02-16 15:38:38 -0700 | [diff] [blame] | 45 | if (!arch_clock || !arch_clock->clk_enable) |
| 46 | return -EINVAL; |
| 47 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 48 | spin_lock_irqsave(&clockfw_lock, flags); |
Paul Walmsley | e07f469 | 2011-02-16 15:38:38 -0700 | [diff] [blame] | 49 | ret = arch_clock->clk_enable(clk); |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 50 | spin_unlock_irqrestore(&clockfw_lock, flags); |
| 51 | |
| 52 | return ret; |
| 53 | } |
| 54 | EXPORT_SYMBOL(clk_enable); |
| 55 | |
| 56 | void clk_disable(struct clk *clk) |
| 57 | { |
| 58 | unsigned long flags; |
| 59 | |
Tony Lindgren | b824efa | 2006-04-02 17:46:20 +0100 | [diff] [blame] | 60 | if (clk == NULL || IS_ERR(clk)) |
| 61 | return; |
| 62 | |
Paul Walmsley | e07f469 | 2011-02-16 15:38:38 -0700 | [diff] [blame] | 63 | if (!arch_clock || !arch_clock->clk_disable) |
| 64 | return; |
| 65 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 66 | spin_lock_irqsave(&clockfw_lock, flags); |
Tony Lindgren | 7cf9577 | 2007-08-07 05:20:00 -0700 | [diff] [blame] | 67 | if (clk->usecount == 0) { |
Paul Walmsley | 6041c27 | 2010-10-08 11:40:20 -0600 | [diff] [blame] | 68 | pr_err("Trying disable clock %s with 0 usecount\n", |
Tony Lindgren | 7cf9577 | 2007-08-07 05:20:00 -0700 | [diff] [blame] | 69 | clk->name); |
| 70 | WARN_ON(1); |
| 71 | goto out; |
| 72 | } |
| 73 | |
Paul Walmsley | e07f469 | 2011-02-16 15:38:38 -0700 | [diff] [blame] | 74 | arch_clock->clk_disable(clk); |
Tony Lindgren | 7cf9577 | 2007-08-07 05:20:00 -0700 | [diff] [blame] | 75 | |
| 76 | out: |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 77 | spin_unlock_irqrestore(&clockfw_lock, flags); |
| 78 | } |
| 79 | EXPORT_SYMBOL(clk_disable); |
| 80 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 81 | unsigned long clk_get_rate(struct clk *clk) |
| 82 | { |
| 83 | unsigned long flags; |
Paul Walmsley | e07f469 | 2011-02-16 15:38:38 -0700 | [diff] [blame] | 84 | unsigned long ret; |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 85 | |
Tony Lindgren | b824efa | 2006-04-02 17:46:20 +0100 | [diff] [blame] | 86 | if (clk == NULL || IS_ERR(clk)) |
| 87 | return 0; |
| 88 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 89 | spin_lock_irqsave(&clockfw_lock, flags); |
| 90 | ret = clk->rate; |
| 91 | spin_unlock_irqrestore(&clockfw_lock, flags); |
| 92 | |
| 93 | return ret; |
| 94 | } |
| 95 | EXPORT_SYMBOL(clk_get_rate); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | |
Paul Walmsley | 3587aeb | 2010-05-18 18:40:26 -0600 | [diff] [blame] | 97 | /* |
Tony Lindgren | f07adc5 | 2006-01-17 15:27:09 -0800 | [diff] [blame] | 98 | * Optional clock functions defined in include/linux/clk.h |
Paul Walmsley | 3587aeb | 2010-05-18 18:40:26 -0600 | [diff] [blame] | 99 | */ |
Tony Lindgren | bb13b5f | 2005-07-10 19:58:18 +0100 | [diff] [blame] | 100 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | long clk_round_rate(struct clk *clk, unsigned long rate) |
| 102 | { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 103 | unsigned long flags; |
Paul Walmsley | e07f469 | 2011-02-16 15:38:38 -0700 | [diff] [blame] | 104 | long ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | |
Tony Lindgren | b824efa | 2006-04-02 17:46:20 +0100 | [diff] [blame] | 106 | if (clk == NULL || IS_ERR(clk)) |
Paul Walmsley | e07f469 | 2011-02-16 15:38:38 -0700 | [diff] [blame] | 107 | return 0; |
| 108 | |
| 109 | if (!arch_clock || !arch_clock->clk_round_rate) |
| 110 | return 0; |
Tony Lindgren | b824efa | 2006-04-02 17:46:20 +0100 | [diff] [blame] | 111 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 112 | spin_lock_irqsave(&clockfw_lock, flags); |
Paul Walmsley | e07f469 | 2011-02-16 15:38:38 -0700 | [diff] [blame] | 113 | ret = arch_clock->clk_round_rate(clk, rate); |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 114 | spin_unlock_irqrestore(&clockfw_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 116 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | } |
| 118 | EXPORT_SYMBOL(clk_round_rate); |
| 119 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | int clk_set_rate(struct clk *clk, unsigned long rate) |
| 121 | { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 122 | unsigned long flags; |
Tony Lindgren | b824efa | 2006-04-02 17:46:20 +0100 | [diff] [blame] | 123 | int ret = -EINVAL; |
| 124 | |
| 125 | if (clk == NULL || IS_ERR(clk)) |
| 126 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | |
Paul Walmsley | e07f469 | 2011-02-16 15:38:38 -0700 | [diff] [blame] | 128 | if (!arch_clock || !arch_clock->clk_set_rate) |
| 129 | return ret; |
| 130 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 131 | spin_lock_irqsave(&clockfw_lock, flags); |
Paul Walmsley | e07f469 | 2011-02-16 15:38:38 -0700 | [diff] [blame] | 132 | ret = arch_clock->clk_set_rate(clk, rate); |
| 133 | if (ret == 0) |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 134 | propagate_rate(clk); |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 135 | spin_unlock_irqrestore(&clockfw_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | |
| 137 | return ret; |
| 138 | } |
| 139 | EXPORT_SYMBOL(clk_set_rate); |
| 140 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 141 | int clk_set_parent(struct clk *clk, struct clk *parent) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | { |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 143 | unsigned long flags; |
Tony Lindgren | b824efa | 2006-04-02 17:46:20 +0100 | [diff] [blame] | 144 | int ret = -EINVAL; |
| 145 | |
| 146 | if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent)) |
| 147 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | |
Paul Walmsley | e07f469 | 2011-02-16 15:38:38 -0700 | [diff] [blame] | 149 | if (!arch_clock || !arch_clock->clk_set_parent) |
| 150 | return ret; |
| 151 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 152 | spin_lock_irqsave(&clockfw_lock, flags); |
Russell King | 4da3782 | 2009-02-24 12:46:31 +0000 | [diff] [blame] | 153 | if (clk->usecount == 0) { |
Paul Walmsley | e07f469 | 2011-02-16 15:38:38 -0700 | [diff] [blame] | 154 | ret = arch_clock->clk_set_parent(clk, parent); |
| 155 | if (ret == 0) |
Russell King | 4da3782 | 2009-02-24 12:46:31 +0000 | [diff] [blame] | 156 | propagate_rate(clk); |
Russell King | 4da3782 | 2009-02-24 12:46:31 +0000 | [diff] [blame] | 157 | } else |
| 158 | ret = -EBUSY; |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 159 | spin_unlock_irqrestore(&clockfw_lock, flags); |
| 160 | |
| 161 | return ret; |
| 162 | } |
| 163 | EXPORT_SYMBOL(clk_set_parent); |
| 164 | |
| 165 | struct clk *clk_get_parent(struct clk *clk) |
| 166 | { |
Russell King | 2e777bf | 2009-02-08 17:49:22 +0000 | [diff] [blame] | 167 | return clk->parent; |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 168 | } |
| 169 | EXPORT_SYMBOL(clk_get_parent); |
| 170 | |
Paul Walmsley | 3587aeb | 2010-05-18 18:40:26 -0600 | [diff] [blame] | 171 | /* |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 172 | * OMAP specific clock functions shared between omap1 and omap2 |
Paul Walmsley | 3587aeb | 2010-05-18 18:40:26 -0600 | [diff] [blame] | 173 | */ |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 174 | |
Paul Walmsley | d373019 | 2010-01-26 20:13:11 -0700 | [diff] [blame] | 175 | int __initdata mpurate; |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 176 | |
| 177 | /* |
| 178 | * By default we use the rate set by the bootloader. |
| 179 | * You can override this with mpurate= cmdline option. |
| 180 | */ |
| 181 | static int __init omap_clk_setup(char *str) |
| 182 | { |
| 183 | get_option(&str, &mpurate); |
| 184 | |
| 185 | if (!mpurate) |
| 186 | return 1; |
| 187 | |
| 188 | if (mpurate < 1000) |
| 189 | mpurate *= 1000000; |
| 190 | |
| 191 | return 1; |
| 192 | } |
| 193 | __setup("mpurate=", omap_clk_setup); |
| 194 | |
| 195 | /* Used for clocks that always have same value as the parent clock */ |
Russell King | 8b9dbc1 | 2009-02-12 10:12:59 +0000 | [diff] [blame] | 196 | unsigned long followparent_recalc(struct clk *clk) |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 197 | { |
Russell King | 8b9dbc1 | 2009-02-12 10:12:59 +0000 | [diff] [blame] | 198 | return clk->parent->rate; |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 199 | } |
| 200 | |
Paul Walmsley | e9b98f6 | 2010-01-26 20:12:57 -0700 | [diff] [blame] | 201 | /* |
| 202 | * Used for clocks that have the same value as the parent clock, |
| 203 | * divided by some factor |
| 204 | */ |
| 205 | unsigned long omap_fixed_divisor_recalc(struct clk *clk) |
| 206 | { |
| 207 | WARN_ON(!clk->fixed_div); |
| 208 | |
| 209 | return clk->parent->rate / clk->fixed_div; |
| 210 | } |
| 211 | |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 212 | void clk_reparent(struct clk *child, struct clk *parent) |
| 213 | { |
| 214 | list_del_init(&child->sibling); |
| 215 | if (parent) |
| 216 | list_add(&child->sibling, &parent->children); |
| 217 | child->parent = parent; |
| 218 | |
| 219 | /* now do the debugfs renaming to reattach the child |
| 220 | to the proper parent */ |
| 221 | } |
| 222 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 223 | /* Propagate rate to children */ |
Paul Walmsley | 3587aeb | 2010-05-18 18:40:26 -0600 | [diff] [blame] | 224 | void propagate_rate(struct clk *tclk) |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 225 | { |
| 226 | struct clk *clkp; |
| 227 | |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 228 | list_for_each_entry(clkp, &tclk->children, sibling) { |
Russell King | 9a5feda | 2008-11-13 13:44:15 +0000 | [diff] [blame] | 229 | if (clkp->recalc) |
Russell King | 8b9dbc1 | 2009-02-12 10:12:59 +0000 | [diff] [blame] | 230 | clkp->rate = clkp->recalc(clkp); |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 231 | propagate_rate(clkp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | } |
| 234 | |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 235 | static LIST_HEAD(root_clks); |
| 236 | |
Paul Walmsley | 6b8858a | 2008-03-18 10:35:15 +0200 | [diff] [blame] | 237 | /** |
| 238 | * recalculate_root_clocks - recalculate and propagate all root clocks |
| 239 | * |
| 240 | * Recalculates all root clocks (clocks with no parent), which if the |
| 241 | * clock's .recalc is set correctly, should also propagate their rates. |
| 242 | * Called at init. |
| 243 | */ |
| 244 | void recalculate_root_clocks(void) |
| 245 | { |
| 246 | struct clk *clkp; |
| 247 | |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 248 | list_for_each_entry(clkp, &root_clks, sibling) { |
| 249 | if (clkp->recalc) |
Russell King | 8b9dbc1 | 2009-02-12 10:12:59 +0000 | [diff] [blame] | 250 | clkp->rate = clkp->recalc(clkp); |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 251 | propagate_rate(clkp); |
Paul Walmsley | 6b8858a | 2008-03-18 10:35:15 +0200 | [diff] [blame] | 252 | } |
| 253 | } |
| 254 | |
Paul Walmsley | c808811 | 2009-04-22 19:48:53 -0600 | [diff] [blame] | 255 | /** |
Paul Walmsley | 7971687 | 2009-05-12 17:50:30 -0600 | [diff] [blame] | 256 | * clk_preinit - initialize any fields in the struct clk before clk init |
Paul Walmsley | c808811 | 2009-04-22 19:48:53 -0600 | [diff] [blame] | 257 | * @clk: struct clk * to initialize |
| 258 | * |
| 259 | * Initialize any struct clk fields needed before normal clk initialization |
| 260 | * can run. No return value. |
| 261 | */ |
Paul Walmsley | 7971687 | 2009-05-12 17:50:30 -0600 | [diff] [blame] | 262 | void clk_preinit(struct clk *clk) |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 263 | { |
| 264 | INIT_LIST_HEAD(&clk->children); |
| 265 | } |
| 266 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | int clk_register(struct clk *clk) |
| 268 | { |
Tony Lindgren | b824efa | 2006-04-02 17:46:20 +0100 | [diff] [blame] | 269 | if (clk == NULL || IS_ERR(clk)) |
| 270 | return -EINVAL; |
| 271 | |
Russell King | dbb674d | 2009-01-22 16:08:04 +0000 | [diff] [blame] | 272 | /* |
| 273 | * trap out already registered clocks |
| 274 | */ |
| 275 | if (clk->node.next || clk->node.prev) |
| 276 | return 0; |
| 277 | |
Arjan van de Ven | 0043170 | 2006-01-12 18:42:23 +0000 | [diff] [blame] | 278 | mutex_lock(&clocks_mutex); |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 279 | if (clk->parent) |
| 280 | list_add(&clk->sibling, &clk->parent->children); |
| 281 | else |
| 282 | list_add(&clk->sibling, &root_clks); |
| 283 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | list_add(&clk->node, &clocks); |
| 285 | if (clk->init) |
| 286 | clk->init(clk); |
Arjan van de Ven | 0043170 | 2006-01-12 18:42:23 +0000 | [diff] [blame] | 287 | mutex_unlock(&clocks_mutex); |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 288 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | return 0; |
| 290 | } |
| 291 | EXPORT_SYMBOL(clk_register); |
| 292 | |
| 293 | void clk_unregister(struct clk *clk) |
| 294 | { |
Tony Lindgren | b824efa | 2006-04-02 17:46:20 +0100 | [diff] [blame] | 295 | if (clk == NULL || IS_ERR(clk)) |
| 296 | return; |
| 297 | |
Arjan van de Ven | 0043170 | 2006-01-12 18:42:23 +0000 | [diff] [blame] | 298 | mutex_lock(&clocks_mutex); |
Russell King | 3f0a820 | 2009-01-31 10:05:51 +0000 | [diff] [blame] | 299 | list_del(&clk->sibling); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | list_del(&clk->node); |
Arjan van de Ven | 0043170 | 2006-01-12 18:42:23 +0000 | [diff] [blame] | 301 | mutex_unlock(&clocks_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | } |
| 303 | EXPORT_SYMBOL(clk_unregister); |
| 304 | |
Paul Walmsley | 6b8858a | 2008-03-18 10:35:15 +0200 | [diff] [blame] | 305 | void clk_enable_init_clocks(void) |
| 306 | { |
| 307 | struct clk *clkp; |
| 308 | |
| 309 | list_for_each_entry(clkp, &clocks, node) { |
| 310 | if (clkp->flags & ENABLE_ON_INIT) |
| 311 | clk_enable(clkp); |
| 312 | } |
| 313 | } |
Paul Walmsley | 6b8858a | 2008-03-18 10:35:15 +0200 | [diff] [blame] | 314 | |
Paul Walmsley | 74be842 | 2010-02-22 22:09:29 -0700 | [diff] [blame] | 315 | /** |
| 316 | * omap_clk_get_by_name - locate OMAP struct clk by its name |
| 317 | * @name: name of the struct clk to locate |
| 318 | * |
| 319 | * Locate an OMAP struct clk by its name. Assumes that struct clk |
| 320 | * names are unique. Returns NULL if not found or a pointer to the |
| 321 | * struct clk if found. |
| 322 | */ |
| 323 | struct clk *omap_clk_get_by_name(const char *name) |
| 324 | { |
| 325 | struct clk *c; |
| 326 | struct clk *ret = NULL; |
| 327 | |
| 328 | mutex_lock(&clocks_mutex); |
| 329 | |
| 330 | list_for_each_entry(c, &clocks, node) { |
| 331 | if (!strcmp(c->name, name)) { |
| 332 | ret = c; |
| 333 | break; |
| 334 | } |
| 335 | } |
| 336 | |
| 337 | mutex_unlock(&clocks_mutex); |
| 338 | |
| 339 | return ret; |
| 340 | } |
| 341 | |
Rajendra Nayak | 58e846f | 2011-02-25 15:49:00 -0700 | [diff] [blame] | 342 | int omap_clk_enable_autoidle_all(void) |
| 343 | { |
| 344 | struct clk *c; |
| 345 | unsigned long flags; |
| 346 | |
| 347 | spin_lock_irqsave(&clockfw_lock, flags); |
| 348 | |
| 349 | list_for_each_entry(c, &clocks, node) |
| 350 | if (c->ops->allow_idle) |
| 351 | c->ops->allow_idle(c); |
| 352 | |
| 353 | spin_unlock_irqrestore(&clockfw_lock, flags); |
| 354 | |
| 355 | return 0; |
| 356 | } |
| 357 | |
| 358 | int omap_clk_disable_autoidle_all(void) |
| 359 | { |
| 360 | struct clk *c; |
| 361 | unsigned long flags; |
| 362 | |
| 363 | spin_lock_irqsave(&clockfw_lock, flags); |
| 364 | |
| 365 | list_for_each_entry(c, &clocks, node) |
| 366 | if (c->ops->deny_idle) |
| 367 | c->ops->deny_idle(c); |
| 368 | |
| 369 | spin_unlock_irqrestore(&clockfw_lock, flags); |
| 370 | |
| 371 | return 0; |
| 372 | } |
| 373 | |
Russell King | 897dcde | 2008-11-04 16:35:03 +0000 | [diff] [blame] | 374 | /* |
| 375 | * Low level helpers |
| 376 | */ |
| 377 | static int clkll_enable_null(struct clk *clk) |
| 378 | { |
| 379 | return 0; |
| 380 | } |
| 381 | |
| 382 | static void clkll_disable_null(struct clk *clk) |
| 383 | { |
| 384 | } |
| 385 | |
| 386 | const struct clkops clkops_null = { |
| 387 | .enable = clkll_enable_null, |
| 388 | .disable = clkll_disable_null, |
| 389 | }; |
| 390 | |
Santosh Shilimkar | 7c43d54 | 2010-02-22 22:09:40 -0700 | [diff] [blame] | 391 | /* |
| 392 | * Dummy clock |
| 393 | * |
| 394 | * Used for clock aliases that are needed on some OMAPs, but not others |
| 395 | */ |
| 396 | struct clk dummy_ck = { |
| 397 | .name = "dummy", |
| 398 | .ops = &clkops_null, |
| 399 | }; |
| 400 | |
Paul Walmsley | 6b8858a | 2008-03-18 10:35:15 +0200 | [diff] [blame] | 401 | #ifdef CONFIG_CPU_FREQ |
| 402 | void clk_init_cpufreq_table(struct cpufreq_frequency_table **table) |
| 403 | { |
| 404 | unsigned long flags; |
| 405 | |
Paul Walmsley | e07f469 | 2011-02-16 15:38:38 -0700 | [diff] [blame] | 406 | if (!arch_clock || !arch_clock->clk_init_cpufreq_table) |
| 407 | return; |
| 408 | |
Paul Walmsley | 6b8858a | 2008-03-18 10:35:15 +0200 | [diff] [blame] | 409 | spin_lock_irqsave(&clockfw_lock, flags); |
Paul Walmsley | e07f469 | 2011-02-16 15:38:38 -0700 | [diff] [blame] | 410 | arch_clock->clk_init_cpufreq_table(table); |
Paul Walmsley | 6b8858a | 2008-03-18 10:35:15 +0200 | [diff] [blame] | 411 | spin_unlock_irqrestore(&clockfw_lock, flags); |
| 412 | } |
Paul Walmsley | 4e37c10 | 2010-01-08 15:23:16 -0700 | [diff] [blame] | 413 | |
| 414 | void clk_exit_cpufreq_table(struct cpufreq_frequency_table **table) |
| 415 | { |
| 416 | unsigned long flags; |
| 417 | |
Paul Walmsley | e07f469 | 2011-02-16 15:38:38 -0700 | [diff] [blame] | 418 | if (!arch_clock || !arch_clock->clk_exit_cpufreq_table) |
| 419 | return; |
| 420 | |
Paul Walmsley | 4e37c10 | 2010-01-08 15:23:16 -0700 | [diff] [blame] | 421 | spin_lock_irqsave(&clockfw_lock, flags); |
Paul Walmsley | e07f469 | 2011-02-16 15:38:38 -0700 | [diff] [blame] | 422 | arch_clock->clk_exit_cpufreq_table(table); |
Paul Walmsley | 4e37c10 | 2010-01-08 15:23:16 -0700 | [diff] [blame] | 423 | spin_unlock_irqrestore(&clockfw_lock, flags); |
| 424 | } |
Paul Walmsley | 6b8858a | 2008-03-18 10:35:15 +0200 | [diff] [blame] | 425 | #endif |
| 426 | |
Paul Walmsley | 3587aeb | 2010-05-18 18:40:26 -0600 | [diff] [blame] | 427 | /* |
| 428 | * |
| 429 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | |
Tony Lindgren | 90afd5c | 2006-09-25 13:27:20 +0300 | [diff] [blame] | 431 | #ifdef CONFIG_OMAP_RESET_CLOCKS |
| 432 | /* |
| 433 | * Disable any unused clocks left on by the bootloader |
| 434 | */ |
| 435 | static int __init clk_disable_unused(void) |
| 436 | { |
| 437 | struct clk *ck; |
| 438 | unsigned long flags; |
| 439 | |
Paul Walmsley | e07f469 | 2011-02-16 15:38:38 -0700 | [diff] [blame] | 440 | if (!arch_clock || !arch_clock->clk_disable_unused) |
| 441 | return 0; |
| 442 | |
Paul Walmsley | 6041c27 | 2010-10-08 11:40:20 -0600 | [diff] [blame] | 443 | pr_info("clock: disabling unused clocks to save power\n"); |
Tony Lindgren | 90afd5c | 2006-09-25 13:27:20 +0300 | [diff] [blame] | 444 | list_for_each_entry(ck, &clocks, node) { |
Russell King | 897dcde | 2008-11-04 16:35:03 +0000 | [diff] [blame] | 445 | if (ck->ops == &clkops_null) |
| 446 | continue; |
| 447 | |
Paul Walmsley | 3587aeb | 2010-05-18 18:40:26 -0600 | [diff] [blame] | 448 | if (ck->usecount > 0 || !ck->enable_reg) |
Tony Lindgren | 90afd5c | 2006-09-25 13:27:20 +0300 | [diff] [blame] | 449 | continue; |
| 450 | |
| 451 | spin_lock_irqsave(&clockfw_lock, flags); |
Paul Walmsley | e07f469 | 2011-02-16 15:38:38 -0700 | [diff] [blame] | 452 | arch_clock->clk_disable_unused(ck); |
Tony Lindgren | 90afd5c | 2006-09-25 13:27:20 +0300 | [diff] [blame] | 453 | spin_unlock_irqrestore(&clockfw_lock, flags); |
| 454 | } |
| 455 | |
| 456 | return 0; |
| 457 | } |
| 458 | late_initcall(clk_disable_unused); |
Paul Walmsley | b80b956 | 2011-02-25 15:49:01 -0700 | [diff] [blame] | 459 | late_initcall(omap_clk_enable_autoidle_all); |
Tony Lindgren | 90afd5c | 2006-09-25 13:27:20 +0300 | [diff] [blame] | 460 | #endif |
| 461 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 462 | int __init clk_init(struct clk_functions * custom_clocks) |
| 463 | { |
| 464 | if (!custom_clocks) { |
Paul Walmsley | 6041c27 | 2010-10-08 11:40:20 -0600 | [diff] [blame] | 465 | pr_err("No custom clock functions registered\n"); |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 466 | BUG(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | } |
| 468 | |
Tony Lindgren | 1a8bfa1 | 2005-11-10 14:26:50 +0000 | [diff] [blame] | 469 | arch_clock = custom_clocks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 470 | |
| 471 | return 0; |
| 472 | } |
Paul Walmsley | 6b8858a | 2008-03-18 10:35:15 +0200 | [diff] [blame] | 473 | |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 474 | #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) |
| 475 | /* |
| 476 | * debugfs support to trace clock tree hierarchy and attributes |
| 477 | */ |
| 478 | static struct dentry *clk_debugfs_root; |
| 479 | |
| 480 | static int clk_debugfs_register_one(struct clk *c) |
| 481 | { |
| 482 | int err; |
Marek Skuczynski | 0825cc8 | 2010-01-31 10:00:54 +0000 | [diff] [blame] | 483 | struct dentry *d, *child, *child_tmp; |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 484 | struct clk *pa = c->parent; |
| 485 | char s[255]; |
| 486 | char *p = s; |
| 487 | |
| 488 | p += sprintf(p, "%s", c->name); |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 489 | d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root); |
Zhaolei | e621f26 | 2008-11-04 13:35:07 -0800 | [diff] [blame] | 490 | if (!d) |
| 491 | return -ENOMEM; |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 492 | c->dent = d; |
| 493 | |
| 494 | d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount); |
Zhaolei | e621f26 | 2008-11-04 13:35:07 -0800 | [diff] [blame] | 495 | if (!d) { |
| 496 | err = -ENOMEM; |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 497 | goto err_out; |
| 498 | } |
| 499 | d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate); |
Zhaolei | e621f26 | 2008-11-04 13:35:07 -0800 | [diff] [blame] | 500 | if (!d) { |
| 501 | err = -ENOMEM; |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 502 | goto err_out; |
| 503 | } |
| 504 | d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags); |
Zhaolei | e621f26 | 2008-11-04 13:35:07 -0800 | [diff] [blame] | 505 | if (!d) { |
| 506 | err = -ENOMEM; |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 507 | goto err_out; |
| 508 | } |
| 509 | return 0; |
| 510 | |
| 511 | err_out: |
| 512 | d = c->dent; |
Marek Skuczynski | 0825cc8 | 2010-01-31 10:00:54 +0000 | [diff] [blame] | 513 | list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child) |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 514 | debugfs_remove(child); |
| 515 | debugfs_remove(c->dent); |
| 516 | return err; |
| 517 | } |
| 518 | |
| 519 | static int clk_debugfs_register(struct clk *c) |
| 520 | { |
| 521 | int err; |
| 522 | struct clk *pa = c->parent; |
| 523 | |
| 524 | if (pa && !pa->dent) { |
| 525 | err = clk_debugfs_register(pa); |
| 526 | if (err) |
| 527 | return err; |
| 528 | } |
| 529 | |
| 530 | if (!c->dent) { |
| 531 | err = clk_debugfs_register_one(c); |
| 532 | if (err) |
| 533 | return err; |
| 534 | } |
| 535 | return 0; |
| 536 | } |
| 537 | |
| 538 | static int __init clk_debugfs_init(void) |
| 539 | { |
| 540 | struct clk *c; |
| 541 | struct dentry *d; |
| 542 | int err; |
| 543 | |
| 544 | d = debugfs_create_dir("clock", NULL); |
Zhaolei | e621f26 | 2008-11-04 13:35:07 -0800 | [diff] [blame] | 545 | if (!d) |
| 546 | return -ENOMEM; |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 547 | clk_debugfs_root = d; |
| 548 | |
| 549 | list_for_each_entry(c, &clocks, node) { |
| 550 | err = clk_debugfs_register(c); |
| 551 | if (err) |
| 552 | goto err_out; |
| 553 | } |
| 554 | return 0; |
| 555 | err_out: |
Hiroshi DOYU | ca4caa4 | 2009-09-03 20:14:06 +0300 | [diff] [blame] | 556 | debugfs_remove_recursive(clk_debugfs_root); |
Hiroshi DOYU | 137b3ee | 2008-07-03 12:24:41 +0300 | [diff] [blame] | 557 | return err; |
| 558 | } |
| 559 | late_initcall(clk_debugfs_init); |
| 560 | |
| 561 | #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */ |