Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 1 | /* arch/arm/mach-msm/clock.c |
| 2 | * |
| 3 | * Copyright (C) 2007 Google, Inc. |
Tianyi Gou | 7949ecb | 2012-02-14 14:25:32 -0800 | [diff] [blame] | 4 | * Copyright (c) 2007-2012, Code Aurora Forum. All rights reserved. |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 5 | * |
| 6 | * This software is licensed under the terms of the GNU General Public |
| 7 | * License version 2, as published by the Free Software Foundation, and |
| 8 | * may be copied, distributed, and modified under those terms. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | */ |
| 16 | |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 17 | #include <linux/kernel.h> |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 18 | #include <linux/err.h> |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 19 | #include <linux/spinlock.h> |
Stephen Boyd | bd32344 | 2011-02-23 09:37:42 -0800 | [diff] [blame] | 20 | #include <linux/string.h> |
| 21 | #include <linux/module.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 22 | #include <linux/clk.h> |
Stephen Boyd | bd32344 | 2011-02-23 09:37:42 -0800 | [diff] [blame] | 23 | #include <linux/clkdev.h> |
Matt Wagantall | 158f73b | 2012-05-16 11:29:35 -0700 | [diff] [blame] | 24 | #include <linux/list.h> |
Stephen Boyd | 5bc44d5 | 2012-03-29 11:00:57 -0700 | [diff] [blame] | 25 | #include <trace/events/power.h> |
Matt Wagantall | 33d01f5 | 2012-02-23 23:27:44 -0800 | [diff] [blame] | 26 | #include <mach/clk-provider.h> |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 27 | #include "clock.h" |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 28 | |
Matt Wagantall | 158f73b | 2012-05-16 11:29:35 -0700 | [diff] [blame] | 29 | struct handoff_clk { |
| 30 | struct list_head list; |
| 31 | struct clk *clk; |
| 32 | }; |
| 33 | static LIST_HEAD(handoff_list); |
| 34 | |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 35 | /* Find the voltage level required for a given rate. */ |
Patrick Daly | 0a78a0e | 2012-07-23 13:18:59 -0700 | [diff] [blame] | 36 | int find_vdd_level(struct clk *clk, unsigned long rate) |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 37 | { |
| 38 | int level; |
| 39 | |
| 40 | for (level = 0; level < ARRAY_SIZE(clk->fmax); level++) |
| 41 | if (rate <= clk->fmax[level]) |
| 42 | break; |
| 43 | |
| 44 | if (level == ARRAY_SIZE(clk->fmax)) { |
| 45 | pr_err("Rate %lu for %s is greater than highest Fmax\n", rate, |
| 46 | clk->dbg_name); |
| 47 | return -EINVAL; |
| 48 | } |
| 49 | |
| 50 | return level; |
| 51 | } |
| 52 | |
| 53 | /* Update voltage level given the current votes. */ |
| 54 | static int update_vdd(struct clk_vdd_class *vdd_class) |
| 55 | { |
| 56 | int level, rc; |
| 57 | |
| 58 | for (level = ARRAY_SIZE(vdd_class->level_votes)-1; level > 0; level--) |
| 59 | if (vdd_class->level_votes[level]) |
| 60 | break; |
| 61 | |
| 62 | if (level == vdd_class->cur_level) |
| 63 | return 0; |
| 64 | |
| 65 | rc = vdd_class->set_vdd(vdd_class, level); |
| 66 | if (!rc) |
| 67 | vdd_class->cur_level = level; |
| 68 | |
| 69 | return rc; |
| 70 | } |
| 71 | |
| 72 | /* Vote for a voltage level. */ |
| 73 | int vote_vdd_level(struct clk_vdd_class *vdd_class, int level) |
| 74 | { |
| 75 | unsigned long flags; |
| 76 | int rc; |
| 77 | |
| 78 | spin_lock_irqsave(&vdd_class->lock, flags); |
| 79 | vdd_class->level_votes[level]++; |
| 80 | rc = update_vdd(vdd_class); |
| 81 | if (rc) |
| 82 | vdd_class->level_votes[level]--; |
| 83 | spin_unlock_irqrestore(&vdd_class->lock, flags); |
| 84 | |
| 85 | return rc; |
| 86 | } |
| 87 | |
| 88 | /* Remove vote for a voltage level. */ |
| 89 | int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level) |
| 90 | { |
| 91 | unsigned long flags; |
| 92 | int rc = 0; |
| 93 | |
| 94 | spin_lock_irqsave(&vdd_class->lock, flags); |
| 95 | if (WARN(!vdd_class->level_votes[level], |
| 96 | "Reference counts are incorrect for %s level %d\n", |
| 97 | vdd_class->class_name, level)) |
| 98 | goto out; |
| 99 | vdd_class->level_votes[level]--; |
| 100 | rc = update_vdd(vdd_class); |
| 101 | if (rc) |
| 102 | vdd_class->level_votes[level]++; |
| 103 | out: |
| 104 | spin_unlock_irqrestore(&vdd_class->lock, flags); |
| 105 | return rc; |
| 106 | } |
| 107 | |
| 108 | /* Vote for a voltage level corresponding to a clock's rate. */ |
| 109 | static int vote_rate_vdd(struct clk *clk, unsigned long rate) |
| 110 | { |
| 111 | int level; |
| 112 | |
| 113 | if (!clk->vdd_class) |
| 114 | return 0; |
| 115 | |
| 116 | level = find_vdd_level(clk, rate); |
| 117 | if (level < 0) |
| 118 | return level; |
| 119 | |
| 120 | return vote_vdd_level(clk->vdd_class, level); |
| 121 | } |
| 122 | |
| 123 | /* Remove vote for a voltage level corresponding to a clock's rate. */ |
| 124 | static void unvote_rate_vdd(struct clk *clk, unsigned long rate) |
| 125 | { |
| 126 | int level; |
| 127 | |
| 128 | if (!clk->vdd_class) |
| 129 | return; |
| 130 | |
| 131 | level = find_vdd_level(clk, rate); |
| 132 | if (level < 0) |
| 133 | return; |
| 134 | |
| 135 | unvote_vdd_level(clk->vdd_class, level); |
| 136 | } |
| 137 | |
Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame] | 138 | int clk_prepare(struct clk *clk) |
| 139 | { |
| 140 | int ret = 0; |
| 141 | struct clk *parent; |
Vikram Mulukutla | 55e8f99 | 2012-04-17 19:25:10 -0700 | [diff] [blame] | 142 | |
Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame] | 143 | if (!clk) |
| 144 | return 0; |
Vikram Mulukutla | 55e8f99 | 2012-04-17 19:25:10 -0700 | [diff] [blame] | 145 | if (IS_ERR(clk)) |
| 146 | return -EINVAL; |
Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame] | 147 | |
| 148 | mutex_lock(&clk->prepare_lock); |
| 149 | if (clk->prepare_count == 0) { |
| 150 | parent = clk_get_parent(clk); |
| 151 | |
| 152 | ret = clk_prepare(parent); |
| 153 | if (ret) |
| 154 | goto out; |
| 155 | ret = clk_prepare(clk->depends); |
| 156 | if (ret) |
| 157 | goto err_prepare_depends; |
| 158 | |
Stephen Boyd | d86d1f2 | 2012-01-24 17:36:34 -0800 | [diff] [blame] | 159 | ret = vote_rate_vdd(clk, clk->rate); |
| 160 | if (ret) |
| 161 | goto err_vote_vdd; |
Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame] | 162 | if (clk->ops->prepare) |
| 163 | ret = clk->ops->prepare(clk); |
| 164 | if (ret) |
| 165 | goto err_prepare_clock; |
| 166 | } |
| 167 | clk->prepare_count++; |
| 168 | out: |
| 169 | mutex_unlock(&clk->prepare_lock); |
| 170 | return ret; |
| 171 | err_prepare_clock: |
Stephen Boyd | d86d1f2 | 2012-01-24 17:36:34 -0800 | [diff] [blame] | 172 | unvote_rate_vdd(clk, clk->rate); |
| 173 | err_vote_vdd: |
Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame] | 174 | clk_unprepare(clk->depends); |
| 175 | err_prepare_depends: |
| 176 | clk_unprepare(parent); |
| 177 | goto out; |
| 178 | } |
| 179 | EXPORT_SYMBOL(clk_prepare); |
| 180 | |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 181 | /* |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 182 | * Standard clock functions defined in include/linux/clk.h |
| 183 | */ |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 184 | int clk_enable(struct clk *clk) |
| 185 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 186 | int ret = 0; |
Matt Wagantall | 7205eea | 2011-11-04 17:31:29 -0700 | [diff] [blame] | 187 | unsigned long flags; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 188 | struct clk *parent; |
Stephen Boyd | 64dce90 | 2012-08-09 12:59:40 -0700 | [diff] [blame] | 189 | const char *name = clk ? clk->dbg_name : NULL; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 190 | |
| 191 | if (!clk) |
| 192 | return 0; |
Vikram Mulukutla | 55e8f99 | 2012-04-17 19:25:10 -0700 | [diff] [blame] | 193 | if (IS_ERR(clk)) |
| 194 | return -EINVAL; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 195 | |
| 196 | spin_lock_irqsave(&clk->lock, flags); |
Stephen Boyd | 64dce90 | 2012-08-09 12:59:40 -0700 | [diff] [blame] | 197 | WARN(!clk->prepare_count, |
| 198 | "%s: Don't call enable on unprepared clocks\n", name); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 199 | if (clk->count == 0) { |
| 200 | parent = clk_get_parent(clk); |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 201 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 202 | ret = clk_enable(parent); |
| 203 | if (ret) |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 204 | goto err_enable_parent; |
Stephen Boyd | 7fa2674 | 2011-08-11 23:22:29 -0700 | [diff] [blame] | 205 | ret = clk_enable(clk->depends); |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 206 | if (ret) |
| 207 | goto err_enable_depends; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 208 | |
Stephen Boyd | 64dce90 | 2012-08-09 12:59:40 -0700 | [diff] [blame] | 209 | trace_clock_enable(name, 1, smp_processor_id()); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 210 | if (clk->ops->enable) |
| 211 | ret = clk->ops->enable(clk); |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 212 | if (ret) |
| 213 | goto err_enable_clock; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 214 | } |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 215 | clk->count++; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 216 | spin_unlock_irqrestore(&clk->lock, flags); |
| 217 | |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 218 | return 0; |
| 219 | |
| 220 | err_enable_clock: |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 221 | clk_disable(clk->depends); |
| 222 | err_enable_depends: |
| 223 | clk_disable(parent); |
| 224 | err_enable_parent: |
| 225 | spin_unlock_irqrestore(&clk->lock, flags); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 226 | return ret; |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 227 | } |
| 228 | EXPORT_SYMBOL(clk_enable); |
| 229 | |
| 230 | void clk_disable(struct clk *clk) |
| 231 | { |
Stephen Boyd | 64dce90 | 2012-08-09 12:59:40 -0700 | [diff] [blame] | 232 | const char *name = clk ? clk->dbg_name : NULL; |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 233 | unsigned long flags; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 234 | |
Vikram Mulukutla | 55e8f99 | 2012-04-17 19:25:10 -0700 | [diff] [blame] | 235 | if (IS_ERR_OR_NULL(clk)) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 236 | return; |
| 237 | |
| 238 | spin_lock_irqsave(&clk->lock, flags); |
Stephen Boyd | 64dce90 | 2012-08-09 12:59:40 -0700 | [diff] [blame] | 239 | WARN(!clk->prepare_count, |
| 240 | "%s: Never called prepare or calling disable after unprepare\n", |
| 241 | name); |
| 242 | if (WARN(clk->count == 0, "%s is unbalanced", name)) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 243 | goto out; |
| 244 | if (clk->count == 1) { |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 245 | struct clk *parent = clk_get_parent(clk); |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 246 | |
Stephen Boyd | 64dce90 | 2012-08-09 12:59:40 -0700 | [diff] [blame] | 247 | trace_clock_disable(name, 0, smp_processor_id()); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 248 | if (clk->ops->disable) |
| 249 | clk->ops->disable(clk); |
Stephen Boyd | 7fa2674 | 2011-08-11 23:22:29 -0700 | [diff] [blame] | 250 | clk_disable(clk->depends); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 251 | clk_disable(parent); |
| 252 | } |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 253 | clk->count--; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 254 | out: |
| 255 | spin_unlock_irqrestore(&clk->lock, flags); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 256 | } |
| 257 | EXPORT_SYMBOL(clk_disable); |
| 258 | |
Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame] | 259 | void clk_unprepare(struct clk *clk) |
| 260 | { |
Stephen Boyd | 64dce90 | 2012-08-09 12:59:40 -0700 | [diff] [blame] | 261 | const char *name = clk ? clk->dbg_name : NULL; |
| 262 | |
Vikram Mulukutla | 55e8f99 | 2012-04-17 19:25:10 -0700 | [diff] [blame] | 263 | if (IS_ERR_OR_NULL(clk)) |
Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame] | 264 | return; |
| 265 | |
| 266 | mutex_lock(&clk->prepare_lock); |
Stephen Boyd | 64dce90 | 2012-08-09 12:59:40 -0700 | [diff] [blame] | 267 | if (WARN(!clk->prepare_count, "%s is unbalanced (prepare)", name)) |
Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame] | 268 | goto out; |
Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame] | 269 | if (clk->prepare_count == 1) { |
| 270 | struct clk *parent = clk_get_parent(clk); |
| 271 | |
Stephen Boyd | 64dce90 | 2012-08-09 12:59:40 -0700 | [diff] [blame] | 272 | WARN(clk->count, |
Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame] | 273 | "%s: Don't call unprepare when the clock is enabled\n", |
Stephen Boyd | 64dce90 | 2012-08-09 12:59:40 -0700 | [diff] [blame] | 274 | name); |
Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame] | 275 | |
| 276 | if (clk->ops->unprepare) |
| 277 | clk->ops->unprepare(clk); |
Stephen Boyd | d86d1f2 | 2012-01-24 17:36:34 -0800 | [diff] [blame] | 278 | unvote_rate_vdd(clk, clk->rate); |
Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame] | 279 | clk_unprepare(clk->depends); |
| 280 | clk_unprepare(parent); |
| 281 | } |
| 282 | clk->prepare_count--; |
| 283 | out: |
| 284 | mutex_unlock(&clk->prepare_lock); |
| 285 | } |
| 286 | EXPORT_SYMBOL(clk_unprepare); |
| 287 | |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 288 | int clk_reset(struct clk *clk, enum clk_reset_action action) |
| 289 | { |
Vikram Mulukutla | 55e8f99 | 2012-04-17 19:25:10 -0700 | [diff] [blame] | 290 | if (IS_ERR_OR_NULL(clk)) |
| 291 | return -EINVAL; |
| 292 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 293 | if (!clk->ops->reset) |
| 294 | return -ENOSYS; |
| 295 | |
| 296 | return clk->ops->reset(clk, action); |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 297 | } |
| 298 | EXPORT_SYMBOL(clk_reset); |
| 299 | |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 300 | unsigned long clk_get_rate(struct clk *clk) |
| 301 | { |
Vikram Mulukutla | 55e8f99 | 2012-04-17 19:25:10 -0700 | [diff] [blame] | 302 | if (IS_ERR_OR_NULL(clk)) |
| 303 | return 0; |
| 304 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 305 | if (!clk->ops->get_rate) |
Tianyi Gou | 7949ecb | 2012-02-14 14:25:32 -0800 | [diff] [blame] | 306 | return clk->rate; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 307 | |
| 308 | return clk->ops->get_rate(clk); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 309 | } |
| 310 | EXPORT_SYMBOL(clk_get_rate); |
| 311 | |
Matt Wagantall | 77952c4 | 2011-11-08 18:45:48 -0800 | [diff] [blame] | 312 | int clk_set_rate(struct clk *clk, unsigned long rate) |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 313 | { |
Stephen Boyd | d86d1f2 | 2012-01-24 17:36:34 -0800 | [diff] [blame] | 314 | unsigned long start_rate; |
Stephen Boyd | 4fefefc | 2012-04-13 13:37:46 -0700 | [diff] [blame] | 315 | int rc = 0; |
Stephen Boyd | 64dce90 | 2012-08-09 12:59:40 -0700 | [diff] [blame] | 316 | const char *name = clk ? clk->dbg_name : NULL; |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 317 | |
Vikram Mulukutla | 55e8f99 | 2012-04-17 19:25:10 -0700 | [diff] [blame] | 318 | if (IS_ERR_OR_NULL(clk)) |
| 319 | return -EINVAL; |
| 320 | |
Matt Wagantall | 77952c4 | 2011-11-08 18:45:48 -0800 | [diff] [blame] | 321 | if (!clk->ops->set_rate) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 322 | return -ENOSYS; |
Daniel Walker | 3a790bb | 2010-12-13 14:35:10 -0800 | [diff] [blame] | 323 | |
Stephen Boyd | d86d1f2 | 2012-01-24 17:36:34 -0800 | [diff] [blame] | 324 | mutex_lock(&clk->prepare_lock); |
Stephen Boyd | 4fefefc | 2012-04-13 13:37:46 -0700 | [diff] [blame] | 325 | |
| 326 | /* Return early if the rate isn't going to change */ |
| 327 | if (clk->rate == rate) |
| 328 | goto out; |
| 329 | |
Stephen Boyd | 64dce90 | 2012-08-09 12:59:40 -0700 | [diff] [blame] | 330 | trace_clock_set_rate(name, rate, raw_smp_processor_id()); |
Stephen Boyd | d86d1f2 | 2012-01-24 17:36:34 -0800 | [diff] [blame] | 331 | if (clk->prepare_count) { |
Matt Wagantall | 7205eea | 2011-11-04 17:31:29 -0700 | [diff] [blame] | 332 | start_rate = clk->rate; |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 333 | /* Enforce vdd requirements for target frequency. */ |
| 334 | rc = vote_rate_vdd(clk, rate); |
| 335 | if (rc) |
| 336 | goto err_vote_vdd; |
Matt Wagantall | 77952c4 | 2011-11-08 18:45:48 -0800 | [diff] [blame] | 337 | rc = clk->ops->set_rate(clk, rate); |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 338 | if (rc) |
| 339 | goto err_set_rate; |
| 340 | /* Release vdd requirements for starting frequency. */ |
| 341 | unvote_rate_vdd(clk, start_rate); |
| 342 | } else { |
Matt Wagantall | 77952c4 | 2011-11-08 18:45:48 -0800 | [diff] [blame] | 343 | rc = clk->ops->set_rate(clk, rate); |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 344 | } |
Matt Wagantall | 7205eea | 2011-11-04 17:31:29 -0700 | [diff] [blame] | 345 | |
| 346 | if (!rc) |
| 347 | clk->rate = rate; |
Stephen Boyd | 4fefefc | 2012-04-13 13:37:46 -0700 | [diff] [blame] | 348 | out: |
Stephen Boyd | d86d1f2 | 2012-01-24 17:36:34 -0800 | [diff] [blame] | 349 | mutex_unlock(&clk->prepare_lock); |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 350 | return rc; |
| 351 | |
| 352 | err_set_rate: |
| 353 | unvote_rate_vdd(clk, rate); |
| 354 | err_vote_vdd: |
Stephen Boyd | d86d1f2 | 2012-01-24 17:36:34 -0800 | [diff] [blame] | 355 | goto out; |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 356 | } |
Matt Wagantall | 77952c4 | 2011-11-08 18:45:48 -0800 | [diff] [blame] | 357 | EXPORT_SYMBOL(clk_set_rate); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 358 | |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 359 | long clk_round_rate(struct clk *clk, unsigned long rate) |
| 360 | { |
Vikram Mulukutla | 55e8f99 | 2012-04-17 19:25:10 -0700 | [diff] [blame] | 361 | if (IS_ERR_OR_NULL(clk)) |
| 362 | return -EINVAL; |
| 363 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 364 | if (!clk->ops->round_rate) |
| 365 | return -ENOSYS; |
| 366 | |
| 367 | return clk->ops->round_rate(clk, rate); |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 368 | } |
| 369 | EXPORT_SYMBOL(clk_round_rate); |
| 370 | |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 371 | int clk_set_max_rate(struct clk *clk, unsigned long rate) |
| 372 | { |
Vikram Mulukutla | 55e8f99 | 2012-04-17 19:25:10 -0700 | [diff] [blame] | 373 | if (IS_ERR_OR_NULL(clk)) |
| 374 | return -EINVAL; |
| 375 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 376 | if (!clk->ops->set_max_rate) |
| 377 | return -ENOSYS; |
| 378 | |
| 379 | return clk->ops->set_max_rate(clk, rate); |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 380 | } |
| 381 | EXPORT_SYMBOL(clk_set_max_rate); |
| 382 | |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 383 | int clk_set_parent(struct clk *clk, struct clk *parent) |
| 384 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 385 | if (!clk->ops->set_parent) |
| 386 | return 0; |
| 387 | |
| 388 | return clk->ops->set_parent(clk, parent); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 389 | } |
| 390 | EXPORT_SYMBOL(clk_set_parent); |
| 391 | |
| 392 | struct clk *clk_get_parent(struct clk *clk) |
| 393 | { |
Vikram Mulukutla | 55e8f99 | 2012-04-17 19:25:10 -0700 | [diff] [blame] | 394 | if (IS_ERR_OR_NULL(clk)) |
| 395 | return NULL; |
| 396 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 397 | if (!clk->ops->get_parent) |
| 398 | return NULL; |
| 399 | |
| 400 | return clk->ops->get_parent(clk); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 401 | } |
| 402 | EXPORT_SYMBOL(clk_get_parent); |
| 403 | |
| 404 | int clk_set_flags(struct clk *clk, unsigned long flags) |
| 405 | { |
Vikram Mulukutla | 55e8f99 | 2012-04-17 19:25:10 -0700 | [diff] [blame] | 406 | if (IS_ERR_OR_NULL(clk)) |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 407 | return -EINVAL; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 408 | if (!clk->ops->set_flags) |
| 409 | return -ENOSYS; |
| 410 | |
| 411 | return clk->ops->set_flags(clk, flags); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 412 | } |
| 413 | EXPORT_SYMBOL(clk_set_flags); |
| 414 | |
Matt Wagantall | 665f0cf | 2012-02-27 15:54:43 -0800 | [diff] [blame] | 415 | static struct clock_init_data *clk_init_data; |
| 416 | |
| 417 | /** |
| 418 | * msm_clock_register() - Register additional clock tables |
| 419 | * @table: Table of clocks |
| 420 | * @size: Size of @table |
| 421 | * |
| 422 | * Upon return, clock APIs may be used to control clocks registered using this |
| 423 | * function. This API may only be used after msm_clock_init() has completed. |
| 424 | * Unlike msm_clock_init(), this function may be called multiple times with |
| 425 | * different clock lists and used after the kernel has finished booting. |
| 426 | */ |
| 427 | int msm_clock_register(struct clk_lookup *table, size_t size) |
| 428 | { |
| 429 | if (!clk_init_data) |
| 430 | return -ENODEV; |
| 431 | |
| 432 | if (!table) |
| 433 | return -EINVAL; |
| 434 | |
| 435 | clkdev_add_table(table, size); |
| 436 | clock_debug_register(table, size); |
| 437 | |
| 438 | return 0; |
| 439 | } |
| 440 | EXPORT_SYMBOL(msm_clock_register); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 441 | |
Matt Wagantall | f30fceb | 2012-06-12 19:13:11 -0700 | [diff] [blame] | 442 | static enum handoff __init __handoff_clk(struct clk *clk) |
| 443 | { |
| 444 | enum handoff ret; |
| 445 | struct handoff_clk *h; |
Matt Wagantall | 2a59b21 | 2012-06-12 19:16:01 -0700 | [diff] [blame] | 446 | unsigned long rate; |
Matt Wagantall | f30fceb | 2012-06-12 19:13:11 -0700 | [diff] [blame] | 447 | int err = 0; |
| 448 | |
| 449 | /* |
| 450 | * Tree roots don't have parents, but need to be handed off. So, |
| 451 | * terminate recursion by returning "enabled". Also return "enabled" |
| 452 | * for clocks with non-zero enable counts since they must have already |
| 453 | * been handed off. |
| 454 | */ |
| 455 | if (clk == NULL || clk->count) |
| 456 | return HANDOFF_ENABLED_CLK; |
| 457 | |
| 458 | /* Clocks without handoff functions are assumed to be disabled. */ |
| 459 | if (!clk->ops->handoff || (clk->flags & CLKFLAG_SKIP_HANDOFF)) |
| 460 | return HANDOFF_DISABLED_CLK; |
| 461 | |
| 462 | /* |
| 463 | * Handoff functions for children must be called before their parents' |
| 464 | * so that the correct parent is returned by the clk_get_parent() below. |
| 465 | */ |
| 466 | ret = clk->ops->handoff(clk); |
| 467 | if (ret == HANDOFF_ENABLED_CLK) { |
| 468 | ret = __handoff_clk(clk_get_parent(clk)); |
| 469 | if (ret == HANDOFF_ENABLED_CLK) { |
| 470 | h = kmalloc(sizeof(*h), GFP_KERNEL); |
| 471 | if (!h) { |
| 472 | err = -ENOMEM; |
| 473 | goto out; |
| 474 | } |
| 475 | err = clk_prepare_enable(clk); |
| 476 | if (err) |
| 477 | goto out; |
Matt Wagantall | 2a59b21 | 2012-06-12 19:16:01 -0700 | [diff] [blame] | 478 | rate = clk_get_rate(clk); |
| 479 | if (rate) |
| 480 | pr_debug("%s rate=%lu\n", clk->dbg_name, rate); |
Matt Wagantall | f30fceb | 2012-06-12 19:13:11 -0700 | [diff] [blame] | 481 | h->clk = clk; |
| 482 | list_add_tail(&h->list, &handoff_list); |
| 483 | } |
| 484 | } |
| 485 | out: |
| 486 | if (err) { |
| 487 | pr_err("%s handoff failed (%d)\n", clk->dbg_name, err); |
| 488 | kfree(h); |
| 489 | ret = HANDOFF_DISABLED_CLK; |
| 490 | } |
| 491 | return ret; |
| 492 | } |
| 493 | |
Matt Wagantall | 665f0cf | 2012-02-27 15:54:43 -0800 | [diff] [blame] | 494 | /** |
| 495 | * msm_clock_init() - Register and initialize a clock driver |
| 496 | * @data: Driver-specific clock initialization data |
| 497 | * |
| 498 | * Upon return from this call, clock APIs may be used to control |
| 499 | * clocks registered with this API. |
| 500 | */ |
| 501 | int __init msm_clock_init(struct clock_init_data *data) |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 502 | { |
| 503 | unsigned n; |
Stephen Boyd | 94625ef | 2011-07-12 17:06:01 -0700 | [diff] [blame] | 504 | struct clk_lookup *clock_tbl; |
| 505 | size_t num_clocks; |
Matt Wagantall | b37fea4 | 2012-04-04 16:47:23 -0700 | [diff] [blame] | 506 | struct clk *clk; |
Stephen Boyd | bb600ae | 2011-08-02 20:11:40 -0700 | [diff] [blame] | 507 | |
Matt Wagantall | 665f0cf | 2012-02-27 15:54:43 -0800 | [diff] [blame] | 508 | if (!data) |
| 509 | return -EINVAL; |
| 510 | |
Stephen Boyd | bb600ae | 2011-08-02 20:11:40 -0700 | [diff] [blame] | 511 | clk_init_data = data; |
Matt Wagantall | b64888f | 2012-04-02 21:35:07 -0700 | [diff] [blame] | 512 | if (clk_init_data->pre_init) |
| 513 | clk_init_data->pre_init(); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 514 | |
Stephen Boyd | 94625ef | 2011-07-12 17:06:01 -0700 | [diff] [blame] | 515 | clock_tbl = data->table; |
| 516 | num_clocks = data->size; |
| 517 | |
Stephen Boyd | bd32344 | 2011-02-23 09:37:42 -0800 | [diff] [blame] | 518 | for (n = 0; n < num_clocks; n++) { |
Matt Wagantall | b37fea4 | 2012-04-04 16:47:23 -0700 | [diff] [blame] | 519 | struct clk *parent; |
| 520 | clk = clock_tbl[n].clk; |
| 521 | parent = clk_get_parent(clk); |
Matt Wagantall | e1482bf | 2012-04-04 16:23:45 -0700 | [diff] [blame] | 522 | if (parent && list_empty(&clk->siblings)) |
| 523 | list_add(&clk->siblings, &parent->children); |
Matt Wagantall | b37fea4 | 2012-04-04 16:47:23 -0700 | [diff] [blame] | 524 | } |
| 525 | |
| 526 | /* |
| 527 | * Detect and preserve initial clock state until clock_late_init() or |
| 528 | * a driver explicitly changes it, whichever is first. |
| 529 | */ |
Matt Wagantall | f30fceb | 2012-06-12 19:13:11 -0700 | [diff] [blame] | 530 | for (n = 0; n < num_clocks; n++) |
| 531 | __handoff_clk(clock_tbl[n].clk); |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 532 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 533 | clkdev_add_table(clock_tbl, num_clocks); |
Matt Wagantall | b64888f | 2012-04-02 21:35:07 -0700 | [diff] [blame] | 534 | |
| 535 | if (clk_init_data->post_init) |
| 536 | clk_init_data->post_init(); |
Matt Wagantall | 665f0cf | 2012-02-27 15:54:43 -0800 | [diff] [blame] | 537 | |
| 538 | clock_debug_init(); |
| 539 | clock_debug_register(clock_tbl, num_clocks); |
| 540 | |
| 541 | return 0; |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 542 | } |
| 543 | |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 544 | static int __init clock_late_init(void) |
| 545 | { |
Matt Wagantall | 158f73b | 2012-05-16 11:29:35 -0700 | [diff] [blame] | 546 | struct handoff_clk *h, *h_temp; |
Matt Wagantall | 665f0cf | 2012-02-27 15:54:43 -0800 | [diff] [blame] | 547 | int ret = 0; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 548 | |
Matt Wagantall | 647d1c1 | 2012-05-16 14:32:14 -0700 | [diff] [blame] | 549 | pr_info("%s: Removing enables held for handed-off clocks\n", __func__); |
Matt Wagantall | 158f73b | 2012-05-16 11:29:35 -0700 | [diff] [blame] | 550 | list_for_each_entry_safe(h, h_temp, &handoff_list, list) { |
| 551 | clk_disable_unprepare(h->clk); |
| 552 | list_del(&h->list); |
| 553 | kfree(h); |
| 554 | } |
| 555 | |
Stephen Boyd | bb600ae | 2011-08-02 20:11:40 -0700 | [diff] [blame] | 556 | if (clk_init_data->late_init) |
| 557 | ret = clk_init_data->late_init(); |
| 558 | return ret; |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 559 | } |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 560 | late_initcall(clock_late_init); |