blob: 5da1663ff43286a2121e96b84f26a07964632873 [file] [log] [blame]
Saravana Kannanc85ecf92013-01-21 17:58:35 -08001/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/err.h>
19#include <linux/ctype.h>
20#include <linux/bitops.h>
21#include <linux/io.h>
22#include <linux/spinlock.h>
23#include <linux/delay.h>
24#include <linux/clk.h>
25
26#include <mach/msm_iomap.h>
Matt Wagantall33d01f52012-02-23 23:27:44 -080027#include <mach/clk-provider.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028#include <mach/clk.h>
29#include <mach/scm-io.h>
30
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031#include "clock-local.h"
32
33#ifdef CONFIG_MSM_SECURE_IO
34#undef readl_relaxed
35#undef writel_relaxed
36#define readl_relaxed secure_readl
37#define writel_relaxed secure_writel
38#endif
39
40/*
41 * When enabling/disabling a clock, check the halt bit up to this number
42 * number of times (with a 1 us delay in between) before continuing.
43 */
Stephen Boyd138da0e2011-08-05 13:25:57 -070044#define HALT_CHECK_MAX_LOOPS 200
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045/* For clock without halt checking, wait this long after enables/disables. */
46#define HALT_CHECK_DELAY_US 10
47
48DEFINE_SPINLOCK(local_clock_reg_lock);
Matt Wagantall84f43fd2011-08-16 23:28:38 -070049struct clk_freq_tbl rcg_dummy_freq = F_END;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051/*
52 * Common Set-Rate Functions
53 */
54
55/* For clocks with MND dividers. */
Matt Wagantallf82f2942012-01-27 13:56:13 -080056void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057{
58 uint32_t ns_reg_val, ctl_reg_val;
59
60 /* Assert MND reset. */
Matt Wagantallf82f2942012-01-27 13:56:13 -080061 ns_reg_val = readl_relaxed(rcg->ns_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062 ns_reg_val |= BIT(7);
Matt Wagantallf82f2942012-01-27 13:56:13 -080063 writel_relaxed(ns_reg_val, rcg->ns_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064
65 /* Program M and D values. */
Matt Wagantallf82f2942012-01-27 13:56:13 -080066 writel_relaxed(nf->md_val, rcg->md_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067
68 /* If the clock has a separate CC register, program it. */
Matt Wagantallf82f2942012-01-27 13:56:13 -080069 if (rcg->ns_reg != rcg->b.ctl_reg) {
70 ctl_reg_val = readl_relaxed(rcg->b.ctl_reg);
71 ctl_reg_val &= ~(rcg->ctl_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072 ctl_reg_val |= nf->ctl_val;
Matt Wagantallf82f2942012-01-27 13:56:13 -080073 writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074 }
75
76 /* Deassert MND reset. */
77 ns_reg_val &= ~BIT(7);
Matt Wagantallf82f2942012-01-27 13:56:13 -080078 writel_relaxed(ns_reg_val, rcg->ns_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070079}
80
Matt Wagantallf82f2942012-01-27 13:56:13 -080081void set_rate_nop(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082{
83 /*
84 * Nothing to do for fixed-rate or integer-divider clocks. Any settings
85 * in NS registers are applied in the enable path, since power can be
86 * saved by leaving an un-clocked or slowly-clocked source selected
87 * until the clock is enabled.
88 */
89}
90
Matt Wagantallf82f2942012-01-27 13:56:13 -080091void set_rate_mnd_8(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070092{
93 uint32_t ctl_reg_val;
94
95 /* Assert MND reset. */
Matt Wagantallf82f2942012-01-27 13:56:13 -080096 ctl_reg_val = readl_relaxed(rcg->b.ctl_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 ctl_reg_val |= BIT(8);
Matt Wagantallf82f2942012-01-27 13:56:13 -080098 writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099
100 /* Program M and D values. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800101 writel_relaxed(nf->md_val, rcg->md_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102
103 /* Program MN counter Enable and Mode. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800104 ctl_reg_val &= ~(rcg->ctl_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105 ctl_reg_val |= nf->ctl_val;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800106 writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700107
108 /* Deassert MND reset. */
109 ctl_reg_val &= ~BIT(8);
Matt Wagantallf82f2942012-01-27 13:56:13 -0800110 writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700111}
112
Matt Wagantallf82f2942012-01-27 13:56:13 -0800113void set_rate_mnd_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700114{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800115 struct bank_masks *banks = rcg->bank_info;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700116 const struct bank_mask_info *new_bank_masks;
117 const struct bank_mask_info *old_bank_masks;
118 uint32_t ns_reg_val, ctl_reg_val;
119 uint32_t bank_sel;
120
121 /*
122 * Determine active bank and program the other one. If the clock is
123 * off, program the active bank since bank switching won't work if
124 * both banks aren't running.
125 */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800126 ctl_reg_val = readl_relaxed(rcg->b.ctl_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127 bank_sel = !!(ctl_reg_val & banks->bank_sel_mask);
128 /* If clock isn't running, don't switch banks. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800129 bank_sel ^= (!rcg->enabled || rcg->current_freq->freq_hz == 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130 if (bank_sel == 0) {
131 new_bank_masks = &banks->bank1_mask;
132 old_bank_masks = &banks->bank0_mask;
133 } else {
134 new_bank_masks = &banks->bank0_mask;
135 old_bank_masks = &banks->bank1_mask;
136 }
137
Matt Wagantallf82f2942012-01-27 13:56:13 -0800138 ns_reg_val = readl_relaxed(rcg->ns_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700139
140 /* Assert bank MND reset. */
141 ns_reg_val |= new_bank_masks->rst_mask;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800142 writel_relaxed(ns_reg_val, rcg->ns_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700143
144 /*
145 * Program NS only if the clock is enabled, since the NS will be set
146 * as part of the enable procedure and should remain with a low-power
147 * MUX input selected until then.
148 */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800149 if (rcg->enabled) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700150 ns_reg_val &= ~(new_bank_masks->ns_mask);
151 ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask);
Matt Wagantallf82f2942012-01-27 13:56:13 -0800152 writel_relaxed(ns_reg_val, rcg->ns_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153 }
154
155 writel_relaxed(nf->md_val, new_bank_masks->md_reg);
156
157 /* Enable counter only if clock is enabled. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800158 if (rcg->enabled)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159 ctl_reg_val |= new_bank_masks->mnd_en_mask;
160 else
161 ctl_reg_val &= ~(new_bank_masks->mnd_en_mask);
162
163 ctl_reg_val &= ~(new_bank_masks->mode_mask);
164 ctl_reg_val |= (nf->ctl_val & new_bank_masks->mode_mask);
Matt Wagantallf82f2942012-01-27 13:56:13 -0800165 writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700166
167 /* Deassert bank MND reset. */
168 ns_reg_val &= ~(new_bank_masks->rst_mask);
Matt Wagantallf82f2942012-01-27 13:56:13 -0800169 writel_relaxed(ns_reg_val, rcg->ns_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170
171 /*
172 * Switch to the new bank if clock is running. If it isn't, then
173 * no switch is necessary since we programmed the active bank.
174 */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800175 if (rcg->enabled && rcg->current_freq->freq_hz) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 ctl_reg_val ^= banks->bank_sel_mask;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800177 writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 /*
179 * Wait at least 6 cycles of slowest bank's clock
180 * for the glitch-free MUX to fully switch sources.
181 */
182 mb();
183 udelay(1);
184
185 /* Disable old bank's MN counter. */
186 ctl_reg_val &= ~(old_bank_masks->mnd_en_mask);
Matt Wagantallf82f2942012-01-27 13:56:13 -0800187 writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188
189 /* Program old bank to a low-power source and divider. */
190 ns_reg_val &= ~(old_bank_masks->ns_mask);
Matt Wagantallf82f2942012-01-27 13:56:13 -0800191 ns_reg_val |= (rcg->freq_tbl->ns_val & old_bank_masks->ns_mask);
192 writel_relaxed(ns_reg_val, rcg->ns_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193 }
194
Matt Wagantall07c45472012-02-10 23:27:24 -0800195 /* Update the MND_EN and NS masks to match the current bank. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800196 rcg->mnd_en_mask = new_bank_masks->mnd_en_mask;
197 rcg->ns_mask = new_bank_masks->ns_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700198}
199
Matt Wagantallf82f2942012-01-27 13:56:13 -0800200void set_rate_div_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700201{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800202 struct bank_masks *banks = rcg->bank_info;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700203 const struct bank_mask_info *new_bank_masks;
204 const struct bank_mask_info *old_bank_masks;
205 uint32_t ns_reg_val, bank_sel;
206
207 /*
208 * Determine active bank and program the other one. If the clock is
209 * off, program the active bank since bank switching won't work if
210 * both banks aren't running.
211 */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800212 ns_reg_val = readl_relaxed(rcg->ns_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213 bank_sel = !!(ns_reg_val & banks->bank_sel_mask);
214 /* If clock isn't running, don't switch banks. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800215 bank_sel ^= (!rcg->enabled || rcg->current_freq->freq_hz == 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216 if (bank_sel == 0) {
217 new_bank_masks = &banks->bank1_mask;
218 old_bank_masks = &banks->bank0_mask;
219 } else {
220 new_bank_masks = &banks->bank0_mask;
221 old_bank_masks = &banks->bank1_mask;
222 }
223
224 /*
225 * Program NS only if the clock is enabled, since the NS will be set
226 * as part of the enable procedure and should remain with a low-power
227 * MUX input selected until then.
228 */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800229 if (rcg->enabled) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 ns_reg_val &= ~(new_bank_masks->ns_mask);
231 ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask);
Matt Wagantallf82f2942012-01-27 13:56:13 -0800232 writel_relaxed(ns_reg_val, rcg->ns_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700233 }
234
235 /*
236 * Switch to the new bank if clock is running. If it isn't, then
237 * no switch is necessary since we programmed the active bank.
238 */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800239 if (rcg->enabled && rcg->current_freq->freq_hz) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240 ns_reg_val ^= banks->bank_sel_mask;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800241 writel_relaxed(ns_reg_val, rcg->ns_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242 /*
243 * Wait at least 6 cycles of slowest bank's clock
244 * for the glitch-free MUX to fully switch sources.
245 */
246 mb();
247 udelay(1);
248
249 /* Program old bank to a low-power source and divider. */
250 ns_reg_val &= ~(old_bank_masks->ns_mask);
Matt Wagantallf82f2942012-01-27 13:56:13 -0800251 ns_reg_val |= (rcg->freq_tbl->ns_val & old_bank_masks->ns_mask);
252 writel_relaxed(ns_reg_val, rcg->ns_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700253 }
254
255 /* Update the NS mask to match the current bank. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800256 rcg->ns_mask = new_bank_masks->ns_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257}
258
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259/*
260 * Clock enable/disable functions
261 */
262
263/* Return non-zero if a clock status registers shows the clock is halted. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800264static int branch_clk_is_halted(const struct branch *b)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800266 int invert = (b->halt_check == ENABLE);
267 int status_bit = readl_relaxed(b->halt_reg) & BIT(b->halt_bit);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268 return invert ? !status_bit : status_bit;
269}
270
Stephen Boyd409b8b42012-04-10 12:12:56 -0700271static int branch_in_hwcg_mode(const struct branch *b)
Stephen Boyda52d7e32011-11-10 11:59:00 -0800272{
273 if (!b->hwcg_mask)
274 return 0;
275
276 return !!(readl_relaxed(b->hwcg_reg) & b->hwcg_mask);
277}
278
Matt Wagantall0de1b3f2012-06-05 19:52:43 -0700279void __branch_enable_reg(const struct branch *b, const char *name)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280{
281 u32 reg_val;
282
Matt Wagantallf82f2942012-01-27 13:56:13 -0800283 if (b->en_mask) {
284 reg_val = readl_relaxed(b->ctl_reg);
285 reg_val |= b->en_mask;
286 writel_relaxed(reg_val, b->ctl_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287 }
288
289 /*
290 * Use a memory barrier since some halt status registers are
291 * not within the same 1K segment as the branch/root enable
292 * registers. It's also needed in the udelay() case to ensure
293 * the delay starts after the branch enable.
294 */
295 mb();
296
Stephen Boyda52d7e32011-11-10 11:59:00 -0800297 /* Skip checking halt bit if the clock is in hardware gated mode */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800298 if (branch_in_hwcg_mode(b))
Stephen Boyda52d7e32011-11-10 11:59:00 -0800299 return;
300
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301 /* Wait for clock to enable before returning. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800302 if (b->halt_check == DELAY) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303 udelay(HALT_CHECK_DELAY_US);
Matt Wagantallf82f2942012-01-27 13:56:13 -0800304 } else if (b->halt_check == ENABLE || b->halt_check == HALT
305 || b->halt_check == ENABLE_VOTED
306 || b->halt_check == HALT_VOTED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700307 int count;
308
309 /* Wait up to HALT_CHECK_MAX_LOOPS for clock to enable. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800310 for (count = HALT_CHECK_MAX_LOOPS; branch_clk_is_halted(b)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311 && count > 0; count--)
312 udelay(1);
313 WARN(count == 0, "%s status stuck at 'off'", name);
314 }
315}
316
317/* Perform any register operations required to enable the clock. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800318static void __rcg_clk_enable_reg(struct rcg_clk *rcg)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319{
320 u32 reg_val;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800321 void __iomem *const reg = rcg->b.ctl_reg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700322
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700323 /*
324 * Program the NS register, if applicable. NS registers are not
325 * set in the set_rate path because power can be saved by deferring
326 * the selection of a clocked source until the clock is enabled.
327 */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800328 if (rcg->ns_mask) {
329 reg_val = readl_relaxed(rcg->ns_reg);
330 reg_val &= ~(rcg->ns_mask);
331 reg_val |= (rcg->current_freq->ns_val & rcg->ns_mask);
332 writel_relaxed(reg_val, rcg->ns_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700333 }
334
335 /* Enable MN counter, if applicable. */
336 reg_val = readl_relaxed(reg);
Matt Wagantallf82f2942012-01-27 13:56:13 -0800337 if (rcg->current_freq->md_val) {
338 reg_val |= rcg->mnd_en_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700339 writel_relaxed(reg_val, reg);
340 }
341 /* Enable root. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800342 if (rcg->root_en_mask) {
343 reg_val |= rcg->root_en_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700344 writel_relaxed(reg_val, reg);
345 }
Matt Wagantall0de1b3f2012-06-05 19:52:43 -0700346 __branch_enable_reg(&rcg->b, rcg->c.dbg_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700347}
348
349/* Perform any register operations required to disable the branch. */
Matt Wagantall0de1b3f2012-06-05 19:52:43 -0700350u32 __branch_disable_reg(const struct branch *b, const char *name)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351{
352 u32 reg_val;
353
Matt Wagantalle3508bb2012-07-23 17:18:37 -0700354 reg_val = b->ctl_reg ? readl_relaxed(b->ctl_reg) : 0;
Stephen Boyda548ca02012-10-10 10:50:11 -0700355 if (b->ctl_reg && b->en_mask) {
Matt Wagantallf82f2942012-01-27 13:56:13 -0800356 reg_val &= ~(b->en_mask);
357 writel_relaxed(reg_val, b->ctl_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358 }
359
360 /*
361 * Use a memory barrier since some halt status registers are
362 * not within the same K segment as the branch/root enable
363 * registers. It's also needed in the udelay() case to ensure
364 * the delay starts after the branch disable.
365 */
366 mb();
367
Stephen Boyda52d7e32011-11-10 11:59:00 -0800368 /* Skip checking halt bit if the clock is in hardware gated mode */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800369 if (branch_in_hwcg_mode(b))
Stephen Boyda52d7e32011-11-10 11:59:00 -0800370 return reg_val;
371
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372 /* Wait for clock to disable before continuing. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800373 if (b->halt_check == DELAY || b->halt_check == ENABLE_VOTED
374 || b->halt_check == HALT_VOTED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375 udelay(HALT_CHECK_DELAY_US);
Matt Wagantallf82f2942012-01-27 13:56:13 -0800376 } else if (b->halt_check == ENABLE || b->halt_check == HALT) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377 int count;
378
379 /* Wait up to HALT_CHECK_MAX_LOOPS for clock to disable. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800380 for (count = HALT_CHECK_MAX_LOOPS; !branch_clk_is_halted(b)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700381 && count > 0; count--)
382 udelay(1);
383 WARN(count == 0, "%s status stuck at 'on'", name);
384 }
385
386 return reg_val;
387}
388
389/* Perform any register operations required to disable the generator. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800390static void __rcg_clk_disable_reg(struct rcg_clk *rcg)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700391{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800392 void __iomem *const reg = rcg->b.ctl_reg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393 uint32_t reg_val;
394
Matt Wagantall0de1b3f2012-06-05 19:52:43 -0700395 reg_val = __branch_disable_reg(&rcg->b, rcg->c.dbg_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 /* Disable root. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800397 if (rcg->root_en_mask) {
398 reg_val &= ~(rcg->root_en_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700399 writel_relaxed(reg_val, reg);
400 }
401 /* Disable MN counter, if applicable. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800402 if (rcg->current_freq->md_val) {
403 reg_val &= ~(rcg->mnd_en_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700404 writel_relaxed(reg_val, reg);
405 }
406 /*
407 * Program NS register to low-power value with an un-clocked or
408 * slowly-clocked source selected.
409 */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800410 if (rcg->ns_mask) {
411 reg_val = readl_relaxed(rcg->ns_reg);
412 reg_val &= ~(rcg->ns_mask);
413 reg_val |= (rcg->freq_tbl->ns_val & rcg->ns_mask);
414 writel_relaxed(reg_val, rcg->ns_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415 }
416}
417
Stephen Boydd86d1f22012-01-24 17:36:34 -0800418static int rcg_clk_prepare(struct clk *c)
419{
420 struct rcg_clk *rcg = to_rcg_clk(c);
421
422 WARN(rcg->current_freq == &rcg_dummy_freq,
423 "Attempting to prepare %s before setting its rate. "
424 "Set the rate first!\n", rcg->c.dbg_name);
425 rcg->prepared = true;
426
427 return 0;
428}
429
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700430/* Enable a rate-settable clock. */
Stephen Boyd409b8b42012-04-10 12:12:56 -0700431static int rcg_clk_enable(struct clk *c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432{
433 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800434 struct rcg_clk *rcg = to_rcg_clk(c);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700435
436 spin_lock_irqsave(&local_clock_reg_lock, flags);
Matt Wagantallf82f2942012-01-27 13:56:13 -0800437 __rcg_clk_enable_reg(rcg);
438 rcg->enabled = true;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700440
441 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700442}
443
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700444/* Disable a rate-settable clock. */
Stephen Boyd409b8b42012-04-10 12:12:56 -0700445static void rcg_clk_disable(struct clk *c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446{
447 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800448 struct rcg_clk *rcg = to_rcg_clk(c);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700449
450 spin_lock_irqsave(&local_clock_reg_lock, flags);
Matt Wagantallf82f2942012-01-27 13:56:13 -0800451 __rcg_clk_disable_reg(rcg);
452 rcg->enabled = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
454}
455
Stephen Boydd86d1f22012-01-24 17:36:34 -0800456static void rcg_clk_unprepare(struct clk *c)
457{
458 struct rcg_clk *rcg = to_rcg_clk(c);
459 rcg->prepared = false;
460}
461
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700462/*
463 * Frequency-related functions
464 */
465
Matt Wagantallab1adce2012-01-24 14:57:24 -0800466/* Set a clock to an exact rate. */
Stephen Boyd409b8b42012-04-10 12:12:56 -0700467static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700468{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800469 struct rcg_clk *rcg = to_rcg_clk(c);
Matt Wagantallab1adce2012-01-24 14:57:24 -0800470 struct clk_freq_tbl *nf, *cf;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471 struct clk *chld;
Matt Wagantallab1adce2012-01-24 14:57:24 -0800472 int rc = 0;
Stephen Boydd86d1f22012-01-24 17:36:34 -0800473 unsigned long flags;
Matt Wagantallab1adce2012-01-24 14:57:24 -0800474
Matt Wagantallf82f2942012-01-27 13:56:13 -0800475 for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
Matt Wagantallab1adce2012-01-24 14:57:24 -0800476 && nf->freq_hz != rate; nf++)
477 ;
478
479 if (nf->freq_hz == FREQ_END)
480 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481
Matt Wagantallf82f2942012-01-27 13:56:13 -0800482 cf = rcg->current_freq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483
Stephen Boydd86d1f22012-01-24 17:36:34 -0800484 /* Enable source clock dependency for the new frequency */
485 if (rcg->prepared) {
486 rc = clk_prepare(nf->src_clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700487 if (rc)
488 return rc;
Stephen Boydd86d1f22012-01-24 17:36:34 -0800489
490 }
491
492 spin_lock_irqsave(&c->lock, flags);
493 if (rcg->enabled) {
494 rc = clk_enable(nf->src_clk);
495 if (rc) {
496 spin_unlock_irqrestore(&c->lock, flags);
497 clk_unprepare(nf->src_clk);
498 return rc;
499 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500 }
501
502 spin_lock(&local_clock_reg_lock);
503
504 /* Disable branch if clock isn't dual-banked with a glitch-free MUX. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800505 if (!rcg->bank_info) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700506 /* Disable all branches to prevent glitches. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800507 list_for_each_entry(chld, &rcg->c.children, siblings) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508 struct branch_clk *x = to_branch_clk(chld);
509 /*
510 * We don't need to grab the child's lock because
511 * we hold the local_clock_reg_lock and 'enabled' is
512 * only modified within lock.
513 */
514 if (x->enabled)
Matt Wagantall0de1b3f2012-06-05 19:52:43 -0700515 __branch_disable_reg(&x->b, x->c.dbg_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516 }
Matt Wagantallf82f2942012-01-27 13:56:13 -0800517 if (rcg->enabled)
518 __rcg_clk_disable_reg(rcg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700519 }
520
521 /* Perform clock-specific frequency switch operations. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800522 BUG_ON(!rcg->set_rate);
523 rcg->set_rate(rcg, nf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700524
525 /*
Matt Wagantall0625ea02011-07-13 18:51:56 -0700526 * Current freq must be updated before __rcg_clk_enable_reg()
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700527 * is called to make sure the MNCNTR_EN bit is set correctly.
528 */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800529 rcg->current_freq = nf;
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700530 c->parent = nf->src_clk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531
532 /* Enable any clocks that were disabled. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800533 if (!rcg->bank_info) {
534 if (rcg->enabled)
535 __rcg_clk_enable_reg(rcg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 /* Enable only branches that were ON before. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800537 list_for_each_entry(chld, &rcg->c.children, siblings) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538 struct branch_clk *x = to_branch_clk(chld);
539 if (x->enabled)
Matt Wagantall0de1b3f2012-06-05 19:52:43 -0700540 __branch_enable_reg(&x->b, x->c.dbg_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541 }
542 }
543
544 spin_unlock(&local_clock_reg_lock);
545
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700546 /* Release source requirements of the old freq. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800547 if (rcg->enabled)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548 clk_disable(cf->src_clk);
Stephen Boydd86d1f22012-01-24 17:36:34 -0800549 spin_unlock_irqrestore(&c->lock, flags);
550
551 if (rcg->prepared)
552 clk_unprepare(cf->src_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553
554 return rc;
555}
556
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700557/* Check if a clock is currently enabled. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800558static int rcg_clk_is_enabled(struct clk *c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800560 return to_rcg_clk(c)->enabled;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700561}
562
Tianyi Gou43215f372013-03-15 12:01:30 -0700563/*
564 * Return a supported rate that's at least the specified rate or
565 * the max supported rate if the specified rate is larger than the
566 * max supported rate.
567 */
Stephen Boyd409b8b42012-04-10 12:12:56 -0700568static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700569{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800570 struct rcg_clk *rcg = to_rcg_clk(c);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571 struct clk_freq_tbl *f;
572
Matt Wagantallf82f2942012-01-27 13:56:13 -0800573 for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574 if (f->freq_hz >= rate)
575 return f->freq_hz;
576
Tianyi Gou43215f372013-03-15 12:01:30 -0700577 f--;
578 return f->freq_hz;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700579}
580
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581/* Return the nth supported frequency for a given clock. */
Stephen Boyd409b8b42012-04-10 12:12:56 -0700582static int rcg_clk_list_rate(struct clk *c, unsigned n)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700583{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800584 struct rcg_clk *rcg = to_rcg_clk(c);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700585
Matt Wagantallf82f2942012-01-27 13:56:13 -0800586 if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587 return -ENXIO;
588
Matt Wagantallf82f2942012-01-27 13:56:13 -0800589 return (rcg->freq_tbl + n)->freq_hz;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590}
591
Stephen Boyda52d7e32011-11-10 11:59:00 -0800592/* Disable hw clock gating if not set at boot */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800593enum handoff branch_handoff(struct branch *b, struct clk *c)
Stephen Boyda52d7e32011-11-10 11:59:00 -0800594{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800595 if (!branch_in_hwcg_mode(b)) {
596 b->hwcg_mask = 0;
Matt Wagantalle3508bb2012-07-23 17:18:37 -0700597 if (b->ctl_reg && readl_relaxed(b->ctl_reg) & b->en_mask)
Matt Wagantalla15833b2012-04-03 11:00:56 -0700598 return HANDOFF_ENABLED_CLK;
Stephen Boyda52d7e32011-11-10 11:59:00 -0800599 }
Matt Wagantalla15833b2012-04-03 11:00:56 -0700600 return HANDOFF_DISABLED_CLK;
Stephen Boyda52d7e32011-11-10 11:59:00 -0800601}
602
Stephen Boyd409b8b42012-04-10 12:12:56 -0700603static enum handoff branch_clk_handoff(struct clk *c)
Stephen Boyda52d7e32011-11-10 11:59:00 -0800604{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800605 struct branch_clk *br = to_branch_clk(c);
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800606 if (branch_handoff(&br->b, &br->c) == HANDOFF_ENABLED_CLK) {
607 br->enabled = true;
608 return HANDOFF_ENABLED_CLK;
609 }
610
611 return HANDOFF_DISABLED_CLK;
Stephen Boyda52d7e32011-11-10 11:59:00 -0800612}
613
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800614static struct clk *rcg_clk_get_parent(struct clk *c)
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700615{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800616 struct rcg_clk *rcg = to_rcg_clk(c);
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700617 uint32_t ctl_val, ns_val, md_val, ns_mask;
618 struct clk_freq_tbl *freq;
Stephen Boyda52d7e32011-11-10 11:59:00 -0800619
Matt Wagantallf82f2942012-01-27 13:56:13 -0800620 ctl_val = readl_relaxed(rcg->b.ctl_reg);
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700621
Matt Wagantallf82f2942012-01-27 13:56:13 -0800622 if (rcg->bank_info) {
623 const struct bank_masks *bank_masks = rcg->bank_info;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700624 const struct bank_mask_info *bank_info;
Stephen Boydc78d9a72011-07-20 00:46:24 -0700625 if (!(ctl_val & bank_masks->bank_sel_mask))
626 bank_info = &bank_masks->bank0_mask;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700627 else
Stephen Boydc78d9a72011-07-20 00:46:24 -0700628 bank_info = &bank_masks->bank1_mask;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700629
630 ns_mask = bank_info->ns_mask;
Tianyi Goue46938b2012-01-31 12:30:12 -0800631 md_val = bank_info->md_reg ?
632 readl_relaxed(bank_info->md_reg) : 0;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700633 } else {
Matt Wagantallf82f2942012-01-27 13:56:13 -0800634 ns_mask = rcg->ns_mask;
635 md_val = rcg->md_reg ? readl_relaxed(rcg->md_reg) : 0;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700636 }
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800637
Matt Wagantalla15833b2012-04-03 11:00:56 -0700638 if (!ns_mask)
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800639 return NULL;
640
Matt Wagantallf82f2942012-01-27 13:56:13 -0800641 ns_val = readl_relaxed(rcg->ns_reg) & ns_mask;
642 for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700643 if ((freq->ns_val & ns_mask) == ns_val &&
Matt Wagantall2a59b212012-06-12 19:16:01 -0700644 (!freq->md_val || freq->md_val == md_val))
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700645 break;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700646 }
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800647
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700648 if (freq->freq_hz == FREQ_END)
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800649 return NULL;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700650
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800651 /* Cache the results for the handoff code. */
Matt Wagantallf82f2942012-01-27 13:56:13 -0800652 rcg->current_freq = freq;
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700653
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800654 return freq->src_clk;
655}
656
657static enum handoff rcg_clk_handoff(struct clk *c)
658{
659 struct rcg_clk *rcg = to_rcg_clk(c);
660 enum handoff ret;
661
662 if (rcg->current_freq && rcg->current_freq->freq_hz != FREQ_END)
663 c->rate = rcg->current_freq->freq_hz;
664
665 ret = branch_handoff(&rcg->b, &rcg->c);
666 if (ret == HANDOFF_DISABLED_CLK)
667 return HANDOFF_DISABLED_CLK;
668
669 rcg->prepared = true;
670 rcg->enabled = true;
Matt Wagantalla15833b2012-04-03 11:00:56 -0700671 return HANDOFF_ENABLED_CLK;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700672}
673
Matt Wagantallae053222012-05-14 19:42:07 -0700674struct clk_ops clk_ops_empty;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700675
676struct fixed_clk gnd_clk = {
677 .c = {
678 .dbg_name = "ground_clk",
Matt Wagantallae053222012-05-14 19:42:07 -0700679 .ops = &clk_ops_empty,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700680 CLK_INIT(gnd_clk.c),
681 },
682};
683
Matt Wagantallf82f2942012-01-27 13:56:13 -0800684static int branch_clk_enable(struct clk *c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700685{
686 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800687 struct branch_clk *br = to_branch_clk(c);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700688
689 spin_lock_irqsave(&local_clock_reg_lock, flags);
Matt Wagantall0de1b3f2012-06-05 19:52:43 -0700690 __branch_enable_reg(&br->b, br->c.dbg_name);
Matt Wagantallf82f2942012-01-27 13:56:13 -0800691 br->enabled = true;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700692 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
693
694 return 0;
695}
696
Matt Wagantallf82f2942012-01-27 13:56:13 -0800697static void branch_clk_disable(struct clk *c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700698{
699 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800700 struct branch_clk *br = to_branch_clk(c);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700701
702 spin_lock_irqsave(&local_clock_reg_lock, flags);
Matt Wagantall0de1b3f2012-06-05 19:52:43 -0700703 __branch_disable_reg(&br->b, br->c.dbg_name);
Matt Wagantallf82f2942012-01-27 13:56:13 -0800704 br->enabled = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700705 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706}
707
Matt Wagantallf82f2942012-01-27 13:56:13 -0800708static int branch_clk_is_enabled(struct clk *c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700709{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800710 return to_branch_clk(c)->enabled;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700711}
712
Stephen Boyda52d7e32011-11-10 11:59:00 -0800713static void branch_enable_hwcg(struct branch *b)
714{
715 unsigned long flags;
716 u32 reg_val;
717
718 spin_lock_irqsave(&local_clock_reg_lock, flags);
719 reg_val = readl_relaxed(b->hwcg_reg);
720 reg_val |= b->hwcg_mask;
721 writel_relaxed(reg_val, b->hwcg_reg);
722 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
723}
724
725static void branch_disable_hwcg(struct branch *b)
726{
727 unsigned long flags;
728 u32 reg_val;
729
730 spin_lock_irqsave(&local_clock_reg_lock, flags);
731 reg_val = readl_relaxed(b->hwcg_reg);
732 reg_val &= ~b->hwcg_mask;
733 writel_relaxed(reg_val, b->hwcg_reg);
734 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
735}
736
Matt Wagantallf82f2942012-01-27 13:56:13 -0800737static void branch_clk_enable_hwcg(struct clk *c)
Stephen Boyda52d7e32011-11-10 11:59:00 -0800738{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800739 branch_enable_hwcg(&to_branch_clk(c)->b);
Stephen Boyda52d7e32011-11-10 11:59:00 -0800740}
741
Matt Wagantallf82f2942012-01-27 13:56:13 -0800742static void branch_clk_disable_hwcg(struct clk *c)
Stephen Boyda52d7e32011-11-10 11:59:00 -0800743{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800744 branch_disable_hwcg(&to_branch_clk(c)->b);
Stephen Boyda52d7e32011-11-10 11:59:00 -0800745}
746
Matt Wagantall7e0b6c92012-01-20 18:48:05 -0800747static int branch_set_flags(struct branch *b, unsigned flags)
748{
749 unsigned long irq_flags;
750 u32 reg_val;
751 int ret = 0;
752
753 if (!b->retain_reg)
754 return -EPERM;
755
756 spin_lock_irqsave(&local_clock_reg_lock, irq_flags);
757 reg_val = readl_relaxed(b->retain_reg);
758 switch (flags) {
759 case CLKFLAG_RETAIN:
760 reg_val |= b->retain_mask;
761 break;
762 case CLKFLAG_NORETAIN:
763 reg_val &= ~b->retain_mask;
764 break;
765 default:
766 ret = -EINVAL;
767 }
768 writel_relaxed(reg_val, b->retain_reg);
769 spin_unlock_irqrestore(&local_clock_reg_lock, irq_flags);
770
771 return ret;
772}
773
Stephen Boyd409b8b42012-04-10 12:12:56 -0700774static int branch_clk_set_flags(struct clk *clk, unsigned flags)
Matt Wagantall7e0b6c92012-01-20 18:48:05 -0800775{
776 return branch_set_flags(&to_branch_clk(clk)->b, flags);
777}
778
Stephen Boyd409b8b42012-04-10 12:12:56 -0700779static int branch_clk_in_hwcg_mode(struct clk *c)
Stephen Boyda52d7e32011-11-10 11:59:00 -0800780{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800781 return branch_in_hwcg_mode(&to_branch_clk(c)->b);
Stephen Boyda52d7e32011-11-10 11:59:00 -0800782}
783
Matt Wagantallf82f2942012-01-27 13:56:13 -0800784static void rcg_clk_enable_hwcg(struct clk *c)
Stephen Boyda52d7e32011-11-10 11:59:00 -0800785{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800786 branch_enable_hwcg(&to_rcg_clk(c)->b);
Stephen Boyda52d7e32011-11-10 11:59:00 -0800787}
788
Matt Wagantallf82f2942012-01-27 13:56:13 -0800789static void rcg_clk_disable_hwcg(struct clk *c)
Stephen Boyda52d7e32011-11-10 11:59:00 -0800790{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800791 branch_disable_hwcg(&to_rcg_clk(c)->b);
Stephen Boyda52d7e32011-11-10 11:59:00 -0800792}
793
Stephen Boyd409b8b42012-04-10 12:12:56 -0700794static int rcg_clk_in_hwcg_mode(struct clk *c)
Stephen Boyda52d7e32011-11-10 11:59:00 -0800795{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800796 return branch_in_hwcg_mode(&to_rcg_clk(c)->b);
Stephen Boyda52d7e32011-11-10 11:59:00 -0800797}
798
Stephen Boyd409b8b42012-04-10 12:12:56 -0700799static int rcg_clk_set_flags(struct clk *clk, unsigned flags)
Matt Wagantall7e0b6c92012-01-20 18:48:05 -0800800{
801 return branch_set_flags(&to_rcg_clk(clk)->b, flags);
802}
803
Stephen Boyda52d7e32011-11-10 11:59:00 -0800804int branch_reset(struct branch *b, enum clk_reset_action action)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700805{
806 int ret = 0;
807 u32 reg_val;
808 unsigned long flags;
809
Stephen Boyda52d7e32011-11-10 11:59:00 -0800810 if (!b->reset_reg)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700811 return -EPERM;
812
Stephen Boyda52d7e32011-11-10 11:59:00 -0800813 /* Disable hw gating when asserting a reset */
814 if (b->hwcg_mask && action == CLK_RESET_ASSERT)
815 branch_disable_hwcg(b);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700816
Stephen Boyda52d7e32011-11-10 11:59:00 -0800817 spin_lock_irqsave(&local_clock_reg_lock, flags);
818 /* Assert/Deassert reset */
819 reg_val = readl_relaxed(b->reset_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700820 switch (action) {
821 case CLK_RESET_ASSERT:
Stephen Boyda52d7e32011-11-10 11:59:00 -0800822 reg_val |= b->reset_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700823 break;
824 case CLK_RESET_DEASSERT:
Stephen Boyda52d7e32011-11-10 11:59:00 -0800825 reg_val &= ~b->reset_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700826 break;
827 default:
828 ret = -EINVAL;
829 }
Stephen Boyda52d7e32011-11-10 11:59:00 -0800830 writel_relaxed(reg_val, b->reset_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
832
Stephen Boyda52d7e32011-11-10 11:59:00 -0800833 /* Enable hw gating when deasserting a reset */
834 if (b->hwcg_mask && action == CLK_RESET_DEASSERT)
835 branch_enable_hwcg(b);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700836 /* Make sure write is issued before returning. */
837 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700838 return ret;
839}
840
Matt Wagantallf82f2942012-01-27 13:56:13 -0800841static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700842{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800843 return branch_reset(&to_branch_clk(c)->b, action);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700844}
Stephen Boydb8ad8222011-11-28 12:17:58 -0800845
Stephen Boyd409b8b42012-04-10 12:12:56 -0700846struct clk_ops clk_ops_branch = {
847 .enable = branch_clk_enable,
848 .disable = branch_clk_disable,
849 .enable_hwcg = branch_clk_enable_hwcg,
850 .disable_hwcg = branch_clk_disable_hwcg,
851 .in_hwcg_mode = branch_clk_in_hwcg_mode,
Stephen Boyd409b8b42012-04-10 12:12:56 -0700852 .is_enabled = branch_clk_is_enabled,
853 .reset = branch_clk_reset,
Stephen Boyd409b8b42012-04-10 12:12:56 -0700854 .handoff = branch_clk_handoff,
855 .set_flags = branch_clk_set_flags,
856};
857
Stephen Boyd230a6742012-09-21 14:17:11 -0700858struct clk_ops clk_ops_smi_2x = {
859 .prepare = branch_clk_enable,
860 .unprepare = branch_clk_disable,
861 .is_enabled = branch_clk_is_enabled,
Stephen Boyd230a6742012-09-21 14:17:11 -0700862 .handoff = branch_clk_handoff,
863};
864
Stephen Boyd409b8b42012-04-10 12:12:56 -0700865struct clk_ops clk_ops_reset = {
866 .reset = branch_clk_reset,
867};
868
Matt Wagantallf82f2942012-01-27 13:56:13 -0800869static int rcg_clk_reset(struct clk *c, enum clk_reset_action action)
Stephen Boyd7bf28142011-12-07 00:30:52 -0800870{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800871 return branch_reset(&to_rcg_clk(c)->b, action);
Stephen Boyd7bf28142011-12-07 00:30:52 -0800872}
873
Stephen Boyd409b8b42012-04-10 12:12:56 -0700874struct clk_ops clk_ops_rcg = {
Stephen Boydd86d1f22012-01-24 17:36:34 -0800875 .prepare = rcg_clk_prepare,
Stephen Boyd409b8b42012-04-10 12:12:56 -0700876 .enable = rcg_clk_enable,
877 .disable = rcg_clk_disable,
Stephen Boydd86d1f22012-01-24 17:36:34 -0800878 .unprepare = rcg_clk_unprepare,
Stephen Boyd409b8b42012-04-10 12:12:56 -0700879 .enable_hwcg = rcg_clk_enable_hwcg,
880 .disable_hwcg = rcg_clk_disable_hwcg,
881 .in_hwcg_mode = rcg_clk_in_hwcg_mode,
Stephen Boyd409b8b42012-04-10 12:12:56 -0700882 .handoff = rcg_clk_handoff,
883 .set_rate = rcg_clk_set_rate,
884 .list_rate = rcg_clk_list_rate,
885 .is_enabled = rcg_clk_is_enabled,
886 .round_rate = rcg_clk_round_rate,
887 .reset = rcg_clk_reset,
Stephen Boyd409b8b42012-04-10 12:12:56 -0700888 .set_flags = rcg_clk_set_flags,
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800889 .get_parent = rcg_clk_get_parent,
Stephen Boyd409b8b42012-04-10 12:12:56 -0700890};
891
Stephen Boydb8ad8222011-11-28 12:17:58 -0800892static int cdiv_clk_enable(struct clk *c)
893{
894 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800895 struct cdiv_clk *cdiv = to_cdiv_clk(c);
Stephen Boydb8ad8222011-11-28 12:17:58 -0800896
897 spin_lock_irqsave(&local_clock_reg_lock, flags);
Matt Wagantall0de1b3f2012-06-05 19:52:43 -0700898 __branch_enable_reg(&cdiv->b, cdiv->c.dbg_name);
Stephen Boydb8ad8222011-11-28 12:17:58 -0800899 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
900
901 return 0;
902}
903
904static void cdiv_clk_disable(struct clk *c)
905{
906 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800907 struct cdiv_clk *cdiv = to_cdiv_clk(c);
Stephen Boydb8ad8222011-11-28 12:17:58 -0800908
909 spin_lock_irqsave(&local_clock_reg_lock, flags);
Matt Wagantall0de1b3f2012-06-05 19:52:43 -0700910 __branch_disable_reg(&cdiv->b, cdiv->c.dbg_name);
Stephen Boydb8ad8222011-11-28 12:17:58 -0800911 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
912}
913
914static int cdiv_clk_set_rate(struct clk *c, unsigned long rate)
915{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800916 struct cdiv_clk *cdiv = to_cdiv_clk(c);
Stephen Boydb8ad8222011-11-28 12:17:58 -0800917 u32 reg_val;
918
Matt Wagantallf82f2942012-01-27 13:56:13 -0800919 if (rate > cdiv->max_div)
Stephen Boydb8ad8222011-11-28 12:17:58 -0800920 return -EINVAL;
Stephen Boydb8ad8222011-11-28 12:17:58 -0800921
922 spin_lock(&local_clock_reg_lock);
Matt Wagantallf82f2942012-01-27 13:56:13 -0800923 reg_val = readl_relaxed(cdiv->ns_reg);
924 reg_val &= ~(cdiv->ext_mask | (cdiv->max_div - 1) << cdiv->div_offset);
Stephen Boydb8ad8222011-11-28 12:17:58 -0800925 /* Non-zero rates mean set a divider, zero means use external input */
926 if (rate)
Matt Wagantallf82f2942012-01-27 13:56:13 -0800927 reg_val |= (rate - 1) << cdiv->div_offset;
Stephen Boydb8ad8222011-11-28 12:17:58 -0800928 else
Matt Wagantallf82f2942012-01-27 13:56:13 -0800929 reg_val |= cdiv->ext_mask;
930 writel_relaxed(reg_val, cdiv->ns_reg);
Stephen Boydb8ad8222011-11-28 12:17:58 -0800931 spin_unlock(&local_clock_reg_lock);
932
Matt Wagantallf82f2942012-01-27 13:56:13 -0800933 cdiv->cur_div = rate;
Stephen Boydb8ad8222011-11-28 12:17:58 -0800934 return 0;
935}
936
937static unsigned long cdiv_clk_get_rate(struct clk *c)
938{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800939 return to_cdiv_clk(c)->cur_div;
Stephen Boydb8ad8222011-11-28 12:17:58 -0800940}
941
942static long cdiv_clk_round_rate(struct clk *c, unsigned long rate)
943{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800944 return rate > to_cdiv_clk(c)->max_div ? -EPERM : rate;
Stephen Boydb8ad8222011-11-28 12:17:58 -0800945}
946
947static int cdiv_clk_list_rate(struct clk *c, unsigned n)
948{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800949 return n > to_cdiv_clk(c)->max_div ? -ENXIO : n;
Stephen Boydb8ad8222011-11-28 12:17:58 -0800950}
951
Matt Wagantalla15833b2012-04-03 11:00:56 -0700952static enum handoff cdiv_clk_handoff(struct clk *c)
Stephen Boydb8ad8222011-11-28 12:17:58 -0800953{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800954 struct cdiv_clk *cdiv = to_cdiv_clk(c);
Matt Wagantalla15833b2012-04-03 11:00:56 -0700955 enum handoff ret;
Stephen Boydb8ad8222011-11-28 12:17:58 -0800956 u32 reg_val;
957
Matt Wagantallf82f2942012-01-27 13:56:13 -0800958 ret = branch_handoff(&cdiv->b, &cdiv->c);
Matt Wagantalla15833b2012-04-03 11:00:56 -0700959 if (ret == HANDOFF_DISABLED_CLK)
960 return ret;
Stephen Boyda52d7e32011-11-10 11:59:00 -0800961
Matt Wagantallf82f2942012-01-27 13:56:13 -0800962 reg_val = readl_relaxed(cdiv->ns_reg);
963 if (reg_val & cdiv->ext_mask) {
964 cdiv->cur_div = 0;
Stephen Boydb8ad8222011-11-28 12:17:58 -0800965 } else {
Matt Wagantallf82f2942012-01-27 13:56:13 -0800966 reg_val >>= cdiv->div_offset;
967 cdiv->cur_div = (reg_val & (cdiv->max_div - 1)) + 1;
Stephen Boydb8ad8222011-11-28 12:17:58 -0800968 }
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800969 c->rate = cdiv->cur_div;
Stephen Boydb8ad8222011-11-28 12:17:58 -0800970
Matt Wagantalla15833b2012-04-03 11:00:56 -0700971 return HANDOFF_ENABLED_CLK;
Stephen Boydb8ad8222011-11-28 12:17:58 -0800972}
973
Stephen Boyda52d7e32011-11-10 11:59:00 -0800974static void cdiv_clk_enable_hwcg(struct clk *c)
975{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800976 branch_enable_hwcg(&to_cdiv_clk(c)->b);
Stephen Boyda52d7e32011-11-10 11:59:00 -0800977}
978
979static void cdiv_clk_disable_hwcg(struct clk *c)
980{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800981 branch_disable_hwcg(&to_cdiv_clk(c)->b);
Stephen Boyda52d7e32011-11-10 11:59:00 -0800982}
983
984static int cdiv_clk_in_hwcg_mode(struct clk *c)
985{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800986 return branch_in_hwcg_mode(&to_cdiv_clk(c)->b);
Stephen Boyda52d7e32011-11-10 11:59:00 -0800987}
988
Stephen Boydb8ad8222011-11-28 12:17:58 -0800989struct clk_ops clk_ops_cdiv = {
990 .enable = cdiv_clk_enable,
991 .disable = cdiv_clk_disable,
Stephen Boyda52d7e32011-11-10 11:59:00 -0800992 .in_hwcg_mode = cdiv_clk_in_hwcg_mode,
993 .enable_hwcg = cdiv_clk_enable_hwcg,
994 .disable_hwcg = cdiv_clk_disable_hwcg,
Stephen Boydb8ad8222011-11-28 12:17:58 -0800995 .handoff = cdiv_clk_handoff,
996 .set_rate = cdiv_clk_set_rate,
997 .get_rate = cdiv_clk_get_rate,
998 .list_rate = cdiv_clk_list_rate,
999 .round_rate = cdiv_clk_round_rate,
Stephen Boydb8ad8222011-11-28 12:17:58 -08001000};