Saravana Kannan | 37b8610 | 2013-04-24 21:51:21 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2013, The Linux Foundation. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 and |
| 6 | * only version 2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/errno.h> |
| 16 | |
| 17 | #include <linux/clk.h> |
| 18 | #include <mach/clk-provider.h> |
| 19 | #include <mach/clock-generic.h> |
| 20 | |
| 21 | /* ==================== Mux clock ==================== */ |
| 22 | |
| 23 | static int parent_to_src_sel(struct mux_clk *mux, struct clk *p) |
| 24 | { |
| 25 | int i; |
| 26 | |
| 27 | for (i = 0; i < mux->num_parents; i++) { |
| 28 | if (mux->parents[i].src == p) |
| 29 | return mux->parents[i].sel; |
| 30 | } |
| 31 | |
| 32 | return -EINVAL; |
| 33 | } |
| 34 | |
| 35 | static int mux_set_parent(struct clk *c, struct clk *p) |
| 36 | { |
| 37 | struct mux_clk *mux = to_mux_clk(c); |
| 38 | int sel = parent_to_src_sel(mux, p); |
| 39 | struct clk *old_parent; |
| 40 | int rc = 0; |
| 41 | unsigned long flags; |
| 42 | |
| 43 | if (sel < 0) |
| 44 | return sel; |
| 45 | |
| 46 | rc = __clk_pre_reparent(c, p, &flags); |
| 47 | if (rc) |
| 48 | goto out; |
| 49 | |
| 50 | rc = mux->ops->set_mux_sel(mux, sel); |
| 51 | if (rc) |
| 52 | goto set_fail; |
| 53 | |
| 54 | old_parent = c->parent; |
| 55 | c->parent = p; |
| 56 | __clk_post_reparent(c, old_parent, &flags); |
| 57 | |
| 58 | return 0; |
| 59 | |
| 60 | set_fail: |
| 61 | __clk_post_reparent(c, p, &flags); |
| 62 | out: |
| 63 | return rc; |
| 64 | } |
| 65 | |
| 66 | static long mux_round_rate(struct clk *c, unsigned long rate) |
| 67 | { |
| 68 | struct mux_clk *mux = to_mux_clk(c); |
| 69 | int i; |
| 70 | long prate, max_prate = 0, rrate = LONG_MAX; |
| 71 | |
| 72 | for (i = 0; i < mux->num_parents; i++) { |
| 73 | prate = clk_round_rate(mux->parents[i].src, rate); |
| 74 | if (prate < rate) { |
| 75 | max_prate = max(prate, max_prate); |
| 76 | continue; |
| 77 | } |
| 78 | |
| 79 | rrate = min(rrate, prate); |
| 80 | } |
| 81 | if (rrate == LONG_MAX) |
| 82 | rrate = max_prate; |
| 83 | |
| 84 | return rrate ? rrate : -EINVAL; |
| 85 | } |
| 86 | |
| 87 | static int mux_set_rate(struct clk *c, unsigned long rate) |
| 88 | { |
| 89 | struct mux_clk *mux = to_mux_clk(c); |
| 90 | struct clk *new_parent = NULL; |
| 91 | int rc = 0, i; |
| 92 | unsigned long new_par_curr_rate; |
| 93 | |
| 94 | for (i = 0; i < mux->num_parents; i++) { |
| 95 | if (clk_round_rate(mux->parents[i].src, rate) == rate) { |
| 96 | new_parent = mux->parents[i].src; |
| 97 | break; |
| 98 | } |
| 99 | } |
| 100 | if (new_parent == NULL) |
| 101 | return -EINVAL; |
| 102 | |
| 103 | /* |
| 104 | * Switch to safe parent since the old and new parent might be the |
| 105 | * same and the parent might temporarily turn off while switching |
| 106 | * rates. |
| 107 | */ |
| 108 | if (mux->safe_sel >= 0) |
| 109 | rc = mux->ops->set_mux_sel(mux, mux->safe_sel); |
| 110 | if (rc) |
| 111 | return rc; |
| 112 | |
| 113 | new_par_curr_rate = clk_get_rate(new_parent); |
| 114 | rc = clk_set_rate(new_parent, rate); |
| 115 | if (rc) |
| 116 | goto set_rate_fail; |
| 117 | |
| 118 | rc = mux_set_parent(c, new_parent); |
| 119 | if (rc) |
| 120 | goto set_par_fail; |
| 121 | |
| 122 | return 0; |
| 123 | |
| 124 | set_par_fail: |
| 125 | clk_set_rate(new_parent, new_par_curr_rate); |
| 126 | set_rate_fail: |
| 127 | WARN(mux->ops->set_mux_sel(mux, parent_to_src_sel(mux, c->parent)), |
| 128 | "Set rate failed for %s. Also in bad state!\n", c->dbg_name); |
| 129 | return rc; |
| 130 | } |
| 131 | |
| 132 | static int mux_enable(struct clk *c) |
| 133 | { |
| 134 | struct mux_clk *mux = to_mux_clk(c); |
| 135 | if (mux->ops->enable) |
| 136 | return mux->ops->enable(mux); |
| 137 | return 0; |
| 138 | } |
| 139 | |
| 140 | static void mux_disable(struct clk *c) |
| 141 | { |
| 142 | struct mux_clk *mux = to_mux_clk(c); |
| 143 | if (mux->ops->disable) |
| 144 | return mux->ops->disable(mux); |
| 145 | } |
| 146 | |
| 147 | static struct clk *mux_get_parent(struct clk *c) |
| 148 | { |
| 149 | struct mux_clk *mux = to_mux_clk(c); |
| 150 | int sel = mux->ops->get_mux_sel(mux); |
| 151 | int i; |
| 152 | |
| 153 | for (i = 0; i < mux->num_parents; i++) { |
| 154 | if (mux->parents[i].sel == sel) |
| 155 | return mux->parents[i].src; |
| 156 | } |
| 157 | |
| 158 | /* Unfamiliar parent. */ |
| 159 | return NULL; |
| 160 | } |
| 161 | |
| 162 | static enum handoff mux_handoff(struct clk *c) |
| 163 | { |
| 164 | struct mux_clk *mux = to_mux_clk(c); |
| 165 | |
| 166 | c->rate = clk_get_rate(c->parent); |
| 167 | mux->safe_sel = parent_to_src_sel(mux, mux->safe_parent); |
| 168 | |
| 169 | if (mux->en_mask && mux->ops && mux->ops->is_enabled) |
| 170 | return mux->ops->is_enabled(mux) |
| 171 | ? HANDOFF_ENABLED_CLK |
| 172 | : HANDOFF_DISABLED_CLK; |
| 173 | |
| 174 | /* |
| 175 | * If this function returns 'enabled' even when the clock downstream |
| 176 | * of this clock is disabled, then handoff code will unnecessarily |
| 177 | * enable the current parent of this clock. If this function always |
| 178 | * returns 'disabled' and a clock downstream is on, the clock handoff |
| 179 | * code will bump up the ref count for this clock and its current |
| 180 | * parent as necessary. So, clocks without an actual HW gate can |
| 181 | * always return disabled. |
| 182 | */ |
| 183 | return HANDOFF_DISABLED_CLK; |
| 184 | } |
| 185 | |
| 186 | struct clk_ops clk_ops_gen_mux = { |
| 187 | .enable = mux_enable, |
| 188 | .disable = mux_disable, |
| 189 | .set_parent = mux_set_parent, |
| 190 | .round_rate = mux_round_rate, |
| 191 | .set_rate = mux_set_rate, |
| 192 | .handoff = mux_handoff, |
| 193 | .get_parent = mux_get_parent, |
| 194 | }; |
Saravana Kannan | d392263 | 2013-04-29 23:52:37 -0700 | [diff] [blame] | 195 | |
| 196 | |
| 197 | /* ==================== Divider clock ==================== */ |
| 198 | |
| 199 | static long __div_round_rate(struct clk *c, unsigned long rate, int *best_div) |
| 200 | { |
| 201 | struct div_clk *d = to_div_clk(c); |
| 202 | unsigned int div, min_div, max_div; |
| 203 | long p_rrate, rrate = LONG_MAX; |
| 204 | |
| 205 | rate = max(rate, 1UL); |
| 206 | |
| 207 | if (!d->ops || !d->ops->set_div) |
| 208 | min_div = max_div = d->div; |
| 209 | else { |
| 210 | min_div = max(d->min_div, 1U); |
| 211 | max_div = min(d->max_div, (unsigned int) (LONG_MAX / rate)); |
| 212 | } |
| 213 | |
| 214 | for (div = min_div; div <= max_div; div++) { |
| 215 | p_rrate = clk_round_rate(c->parent, rate * div); |
| 216 | if (p_rrate < 0) |
| 217 | break; |
| 218 | |
| 219 | p_rrate /= div; |
| 220 | /* |
| 221 | * Trying higher dividers is only going to ask the parent for |
| 222 | * a higher rate. If it can't even output a rate higher than |
| 223 | * the one we request for this divider, the parent is not |
| 224 | * going to be able to output an even higher rate required |
| 225 | * for a higher divider. So, stop trying higher dividers. |
| 226 | */ |
| 227 | if (p_rrate < rate) { |
| 228 | if (rrate == LONG_MAX) { |
| 229 | rrate = p_rrate; |
| 230 | if (best_div) |
| 231 | *best_div = div; |
| 232 | } |
| 233 | break; |
| 234 | } |
| 235 | if (p_rrate < rrate) { |
| 236 | rrate = p_rrate; |
| 237 | if (best_div) |
| 238 | *best_div = div; |
| 239 | } |
| 240 | |
| 241 | if (rrate <= rate + d->rate_margin) |
| 242 | break; |
| 243 | } |
| 244 | |
| 245 | if (rrate == LONG_MAX) |
| 246 | return -EINVAL; |
| 247 | |
| 248 | return rrate; |
| 249 | } |
| 250 | |
| 251 | static long div_round_rate(struct clk *c, unsigned long rate) |
| 252 | { |
| 253 | return __div_round_rate(c, rate, NULL); |
| 254 | } |
| 255 | |
| 256 | static int div_set_rate(struct clk *c, unsigned long rate) |
| 257 | { |
| 258 | struct div_clk *d = to_div_clk(c); |
| 259 | int div, rc = 0; |
| 260 | long rrate, old_prate; |
| 261 | |
| 262 | rrate = __div_round_rate(c, rate, &div); |
| 263 | if (rrate != rate) |
| 264 | return -EINVAL; |
| 265 | |
| 266 | if (div > d->div) |
| 267 | rc = d->ops->set_div(d, div); |
| 268 | if (rc) |
| 269 | return rc; |
| 270 | |
| 271 | old_prate = clk_get_rate(c->parent); |
| 272 | rc = clk_set_rate(c->parent, rate * div); |
| 273 | if (rc) |
| 274 | goto set_rate_fail; |
| 275 | |
| 276 | if (div < d->div) |
| 277 | rc = d->ops->set_div(d, div); |
| 278 | if (rc) |
| 279 | goto div_dec_fail; |
| 280 | |
| 281 | d->div = div; |
| 282 | |
| 283 | return 0; |
| 284 | |
| 285 | div_dec_fail: |
| 286 | WARN(clk_set_rate(c->parent, old_prate), |
| 287 | "Set rate failed for %s. Also in bad state!\n", c->dbg_name); |
| 288 | set_rate_fail: |
| 289 | if (div > d->div) |
| 290 | WARN(d->ops->set_div(d, d->div), |
| 291 | "Set rate failed for %s. Also in bad state!\n", |
| 292 | c->dbg_name); |
| 293 | return rc; |
| 294 | } |
| 295 | |
| 296 | static int div_enable(struct clk *c) |
| 297 | { |
| 298 | struct div_clk *d = to_div_clk(c); |
| 299 | if (d->ops->enable) |
| 300 | return d->ops->enable(d); |
| 301 | return 0; |
| 302 | } |
| 303 | |
| 304 | static void div_disable(struct clk *c) |
| 305 | { |
| 306 | struct div_clk *d = to_div_clk(c); |
| 307 | if (d->ops->disable) |
| 308 | return d->ops->disable(d); |
| 309 | } |
| 310 | |
| 311 | static enum handoff div_handoff(struct clk *c) |
| 312 | { |
| 313 | struct div_clk *d = to_div_clk(c); |
| 314 | |
| 315 | if (d->ops->get_div) |
| 316 | d->div = max(d->ops->get_div(d), 1); |
| 317 | d->div = max(d->div, 1U); |
| 318 | c->rate = clk_get_rate(c->parent) / d->div; |
| 319 | |
| 320 | if (d->en_mask && d->ops && d->ops->is_enabled) |
| 321 | return d->ops->is_enabled(d) |
| 322 | ? HANDOFF_ENABLED_CLK |
| 323 | : HANDOFF_DISABLED_CLK; |
| 324 | |
| 325 | /* |
| 326 | * If this function returns 'enabled' even when the clock downstream |
| 327 | * of this clock is disabled, then handoff code will unnecessarily |
| 328 | * enable the current parent of this clock. If this function always |
| 329 | * returns 'disabled' and a clock downstream is on, the clock handoff |
| 330 | * code will bump up the ref count for this clock and its current |
| 331 | * parent as necessary. So, clocks without an actual HW gate can |
| 332 | * always return disabled. |
| 333 | */ |
| 334 | return HANDOFF_DISABLED_CLK; |
| 335 | } |
| 336 | |
| 337 | struct clk_ops clk_ops_div = { |
| 338 | .enable = div_enable, |
| 339 | .disable = div_disable, |
| 340 | .round_rate = div_round_rate, |
| 341 | .set_rate = div_set_rate, |
| 342 | .handoff = div_handoff, |
| 343 | }; |
| 344 | |
| 345 | static long __slave_div_round_rate(struct clk *c, unsigned long rate, |
| 346 | int *best_div) |
| 347 | { |
| 348 | struct div_clk *d = to_div_clk(c); |
| 349 | unsigned int div, min_div, max_div; |
| 350 | long p_rate; |
| 351 | |
| 352 | rate = max(rate, 1UL); |
| 353 | |
| 354 | if (!d->ops || !d->ops->set_div) |
| 355 | min_div = max_div = d->div; |
| 356 | else { |
| 357 | min_div = d->min_div; |
| 358 | max_div = d->max_div; |
| 359 | } |
| 360 | |
| 361 | p_rate = clk_get_rate(c->parent); |
| 362 | div = p_rate / rate; |
| 363 | div = max(div, min_div); |
| 364 | div = min(div, max_div); |
| 365 | if (best_div) |
| 366 | *best_div = div; |
| 367 | |
| 368 | return p_rate / div; |
| 369 | } |
| 370 | |
| 371 | static long slave_div_round_rate(struct clk *c, unsigned long rate) |
| 372 | { |
| 373 | return __slave_div_round_rate(c, rate, NULL); |
| 374 | } |
| 375 | |
| 376 | static int slave_div_set_rate(struct clk *c, unsigned long rate) |
| 377 | { |
| 378 | struct div_clk *d = to_div_clk(c); |
| 379 | int div, rc = 0; |
| 380 | long rrate; |
| 381 | |
| 382 | rrate = __slave_div_round_rate(c, rate, &div); |
| 383 | if (rrate != rate) |
| 384 | return -EINVAL; |
| 385 | |
| 386 | if (div == d->div) |
| 387 | return 0; |
| 388 | |
| 389 | if (d->ops->set_div) |
| 390 | rc = d->ops->set_div(d, div); |
| 391 | if (rc) |
| 392 | return rc; |
| 393 | |
| 394 | d->div = div; |
| 395 | |
| 396 | return 0; |
| 397 | } |
| 398 | |
| 399 | struct clk_ops clk_ops_slave_div = { |
| 400 | .enable = div_enable, |
| 401 | .disable = div_disable, |
| 402 | .round_rate = slave_div_round_rate, |
| 403 | .set_rate = slave_div_set_rate, |
| 404 | .handoff = div_handoff, |
| 405 | }; |