blob: cf42355fffd129e9a16f6ff303bf696fbef025c8 [file] [log] [blame]
Vikram Mulukutla8810e342011-10-20 20:26:53 -07001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/err.h>
19#include <linux/ctype.h>
20#include <linux/bitops.h>
21#include <linux/io.h>
22#include <linux/spinlock.h>
23#include <linux/delay.h>
24#include <linux/clk.h>
25
26#include <mach/clk.h>
Matt Wagantall33d01f52012-02-23 23:27:44 -080027#include <mach/clk-provider.h>
Vikram Mulukutla8810e342011-10-20 20:26:53 -070028
Vikram Mulukutla8810e342011-10-20 20:26:53 -070029#include "clock-local2.h"
30
31/*
32 * When enabling/disabling a clock, check the halt bit up to this number
33 * number of times (with a 1 us delay in between) before continuing.
34 */
35#define HALT_CHECK_MAX_LOOPS 200
36/* For clock without halt checking, wait this long after enables/disables. */
37#define HALT_CHECK_DELAY_US 10
38
39/*
40 * When updating an RCG configuration, check the update bit up to this number
41 * number of times (with a 1 us delay in between) before continuing.
42 */
43#define UPDATE_CHECK_MAX_LOOPS 200
44
45DEFINE_SPINLOCK(local_clock_reg_lock);
46struct clk_freq_tbl rcg_dummy_freq = F_END;
47
48#define CMD_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg)
49#define CFG_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x4)
50#define M_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x8)
51#define N_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0xC)
52#define D_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x10)
53#define CBCR_REG(x) (*(x)->base + (x)->cbcr_reg)
54#define BCR_REG(x) (*(x)->base + (x)->bcr_reg)
55#define VOTE_REG(x) (*(x)->base + (x)->vote_reg)
56
57/*
58 * Important clock bit positions and masks
59 */
60#define CMD_RCGR_ROOT_ENABLE_BIT BIT(1)
61#define CBCR_BRANCH_ENABLE_BIT BIT(0)
62#define CBCR_BRANCH_OFF_BIT BIT(31)
63#define CMD_RCGR_CONFIG_UPDATE_BIT BIT(0)
64#define CMD_RCGR_ROOT_STATUS_BIT BIT(31)
65#define BCR_BLK_ARES_BIT BIT(0)
66#define CBCR_HW_CTL_BIT BIT(1)
67#define CFG_RCGR_DIV_MASK BM(4, 0)
68#define CFG_RCGR_SRC_SEL_MASK BM(10, 8)
69#define MND_MODE_MASK BM(13, 12)
70#define MND_DUAL_EDGE_MODE_BVAL BVAL(13, 12, 0x2)
71#define CMD_RCGR_CONFIG_DIRTY_MASK BM(7, 4)
72#define CBCR_BRANCH_CDIV_MASK BM(24, 16)
73#define CBCR_BRANCH_CDIV_MASKED(val) BVAL(24, 16, (val));
74
75enum branch_state {
76 BRANCH_ON,
77 BRANCH_OFF,
78};
79
80/*
81 * RCG functions
82 */
83
84/*
85 * Update an RCG with a new configuration. This may include a new M, N, or D
86 * value, source selection or pre-divider value.
87 *
88 */
89static void rcg_update_config(struct rcg_clk *rcg)
90{
91 u32 cmd_rcgr_regval, count;
92
93 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
94 cmd_rcgr_regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
95 writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
96
97 /* Wait for update to take effect */
98 for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
99 if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
100 CMD_RCGR_CONFIG_UPDATE_BIT))
101 return;
102 udelay(1);
103 }
104
105 WARN(count == 0, "%s: rcg didn't update its configuration.",
106 rcg->c.dbg_name);
107}
108
109/* RCG set rate function for clocks with Half Integer Dividers. */
110void set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
111{
112 u32 cfg_regval;
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700113 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700114
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700115 spin_lock_irqsave(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700116 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
117 cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
118 cfg_regval |= nf->div_src_val;
119 writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
120
121 rcg_update_config(rcg);
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700122 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700123}
124
125/* RCG set rate function for clocks with MND & Half Integer Dividers. */
126void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
127{
128 u32 cfg_regval;
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700129 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700130
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700131 spin_lock_irqsave(&local_clock_reg_lock, flags);
132 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700133 writel_relaxed(nf->m_val, M_REG(rcg));
134 writel_relaxed(nf->n_val, N_REG(rcg));
135 writel_relaxed(nf->d_val, D_REG(rcg));
136
137 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
138 cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
139 cfg_regval |= nf->div_src_val;
140
141 /* Activate or disable the M/N:D divider as necessary */
142 cfg_regval &= ~MND_MODE_MASK;
143 if (nf->n_val != 0)
144 cfg_regval |= MND_DUAL_EDGE_MODE_BVAL;
145 writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
146
147 rcg_update_config(rcg);
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700148 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700149}
150
Stephen Boydd86d1f22012-01-24 17:36:34 -0800151static int rcg_clk_prepare(struct clk *c)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700152{
153 struct rcg_clk *rcg = to_rcg_clk(c);
154
155 WARN(rcg->current_freq == &rcg_dummy_freq,
Stephen Boydd86d1f22012-01-24 17:36:34 -0800156 "Attempting to prepare %s before setting its rate. "
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700157 "Set the rate first!\n", rcg->c.dbg_name);
158
159 return 0;
160}
161
162static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
163{
164 struct clk_freq_tbl *cf, *nf;
165 struct rcg_clk *rcg = to_rcg_clk(c);
Stephen Boydd86d1f22012-01-24 17:36:34 -0800166 int rc;
167 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700168
169 for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
170 && nf->freq_hz != rate; nf++)
171 ;
172
173 if (nf->freq_hz == FREQ_END)
174 return -EINVAL;
175
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700176 cf = rcg->current_freq;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700177
Stephen Boydd86d1f22012-01-24 17:36:34 -0800178 /* Enable source clock dependency for the new freq. */
179 if (c->prepare_count) {
180 rc = clk_prepare(nf->src_clk);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700181 if (rc)
Stephen Boydd86d1f22012-01-24 17:36:34 -0800182 return rc;
183 }
184
185 spin_lock_irqsave(&c->lock, flags);
186 if (c->count) {
187 rc = clk_enable(nf->src_clk);
188 if (rc) {
189 spin_unlock_irqrestore(&c->lock, flags);
190 clk_unprepare(nf->src_clk);
191 return rc;
192 }
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700193 }
194
195 BUG_ON(!rcg->set_rate);
196
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700197 /* Perform clock-specific frequency switch operations. */
198 rcg->set_rate(rcg, nf);
199
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700200 /* Release source requirements of the old freq. */
Stephen Boydd86d1f22012-01-24 17:36:34 -0800201 if (c->count)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700202 clk_disable(cf->src_clk);
Stephen Boydd86d1f22012-01-24 17:36:34 -0800203 spin_unlock_irqrestore(&c->lock, flags);
204
205 if (c->prepare_count)
206 clk_unprepare(cf->src_clk);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700207
208 rcg->current_freq = nf;
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700209 c->parent = nf->src_clk;
Stephen Boydd86d1f22012-01-24 17:36:34 -0800210
211 return 0;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700212}
213
214/* Return a supported rate that's at least the specified rate. */
215static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
216{
217 struct rcg_clk *rcg = to_rcg_clk(c);
218 struct clk_freq_tbl *f;
219
220 for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
221 if (f->freq_hz >= rate)
222 return f->freq_hz;
223
224 return -EPERM;
225}
226
227/* Return the nth supported frequency for a given clock. */
228static int rcg_clk_list_rate(struct clk *c, unsigned n)
229{
230 struct rcg_clk *rcg = to_rcg_clk(c);
231
232 if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
233 return -ENXIO;
234
235 return (rcg->freq_tbl + n)->freq_hz;
236}
237
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700238static enum handoff _rcg_clk_handoff(struct rcg_clk *rcg, int has_mnd)
239{
240 u32 n_regval = 0, m_regval = 0, d_regval = 0;
241 u32 cfg_regval;
242 struct clk_freq_tbl *freq;
243 u32 cmd_rcgr_regval;
244
245 /* Is the root enabled? */
246 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
247 if ((cmd_rcgr_regval & CMD_RCGR_ROOT_STATUS_BIT))
248 return HANDOFF_DISABLED_CLK;
249
250 /* Is there a pending configuration? */
251 if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
252 return HANDOFF_UNKNOWN_RATE;
253
254 /* Get values of m, n, d, div and src_sel registers. */
255 if (has_mnd) {
256 m_regval = readl_relaxed(M_REG(rcg));
257 n_regval = readl_relaxed(N_REG(rcg));
258 d_regval = readl_relaxed(D_REG(rcg));
259
260 /*
261 * The n and d values stored in the frequency tables are sign
262 * extended to 32 bits. The n and d values in the registers are
263 * sign extended to 8 or 16 bits. Sign extend the values read
264 * from the registers so that they can be compared to the
265 * values in the frequency tables.
266 */
267 n_regval |= (n_regval >> 8) ? BM(31, 16) : BM(31, 8);
268 d_regval |= (d_regval >> 8) ? BM(31, 16) : BM(31, 8);
269 }
270
271 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
272 cfg_regval &= CFG_RCGR_SRC_SEL_MASK | CFG_RCGR_DIV_MASK
273 | MND_MODE_MASK;
274
275 /* If mnd counter is present, check if it's in use. */
276 has_mnd = (has_mnd) &&
277 ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL);
278
279 /*
280 * Clear out the mn counter mode bits since we now want to compare only
281 * the source mux selection and pre-divider values in the registers.
282 */
283 cfg_regval &= ~MND_MODE_MASK;
284
285 /* Figure out what rate the rcg is running at */
286 for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
287 if (freq->div_src_val != cfg_regval)
288 continue;
289 if (has_mnd) {
290 if (freq->m_val != m_regval)
291 continue;
292 if (freq->n_val != n_regval)
293 continue;
294 if (freq->d_val != d_regval)
295 continue;
296 }
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700297 break;
298 }
299
300 /* No known frequency found */
301 if (freq->freq_hz == FREQ_END)
302 return HANDOFF_UNKNOWN_RATE;
303
304 rcg->current_freq = freq;
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700305 rcg->c.parent = freq->src_clk;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700306 rcg->c.rate = freq->freq_hz;
307
308 return HANDOFF_ENABLED_CLK;
309}
310
311static enum handoff rcg_mnd_clk_handoff(struct clk *c)
312{
313 return _rcg_clk_handoff(to_rcg_clk(c), 1);
314}
315
316static enum handoff rcg_clk_handoff(struct clk *c)
317{
318 return _rcg_clk_handoff(to_rcg_clk(c), 0);
319}
320
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700321#define BRANCH_CHECK_MASK BM(31, 28)
322#define BRANCH_ON_VAL BVAL(31, 28, 0x0)
323#define BRANCH_OFF_VAL BVAL(31, 28, 0x8)
324#define BRANCH_NOC_FSM_ON_VAL BVAL(31, 28, 0x2)
325
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700326/*
327 * Branch clock functions
328 */
329static void branch_clk_halt_check(u32 halt_check, const char *clk_name,
330 void __iomem *cbcr_reg,
331 enum branch_state br_status)
332{
Vikram Mulukutla86b9fa62012-05-02 16:39:14 -0700333 char *status_str = (br_status == BRANCH_ON) ? "off" : "on";
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700334
335 /*
336 * Use a memory barrier since some halt status registers are
337 * not within the same 1K segment as the branch/root enable
338 * registers. It's also needed in the udelay() case to ensure
339 * the delay starts after the branch disable.
340 */
341 mb();
342
343 if (halt_check == DELAY || halt_check == HALT_VOTED) {
344 udelay(HALT_CHECK_DELAY_US);
345 } else if (halt_check == HALT) {
346 int count;
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700347 u32 val;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700348 for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700349 val = readl_relaxed(cbcr_reg);
350 val &= BRANCH_CHECK_MASK;
351 switch (br_status) {
352 case BRANCH_ON:
353 if (val == BRANCH_ON_VAL
354 || val == BRANCH_NOC_FSM_ON_VAL)
355 return;
356 break;
357
358 case BRANCH_OFF:
359 if (val == BRANCH_OFF_VAL)
360 return;
361 break;
362 };
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700363 udelay(1);
364 }
365 WARN(count == 0, "%s status stuck %s", clk_name, status_str);
366 }
367}
368
369static int branch_clk_enable(struct clk *c)
370{
371 unsigned long flags;
372 u32 cbcr_val;
373 struct branch_clk *branch = to_branch_clk(c);
374
375 spin_lock_irqsave(&local_clock_reg_lock, flags);
376 cbcr_val = readl_relaxed(CBCR_REG(branch));
377 cbcr_val |= CBCR_BRANCH_ENABLE_BIT;
378 writel_relaxed(cbcr_val, CBCR_REG(branch));
379 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
380
381 /* Wait for clock to enable before continuing. */
382 branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
383 CBCR_REG(branch), BRANCH_ON);
384
385 return 0;
386}
387
388static void branch_clk_disable(struct clk *c)
389{
390 unsigned long flags;
391 struct branch_clk *branch = to_branch_clk(c);
392 u32 reg_val;
393
394 spin_lock_irqsave(&local_clock_reg_lock, flags);
395 reg_val = readl_relaxed(CBCR_REG(branch));
396 reg_val &= ~CBCR_BRANCH_ENABLE_BIT;
397 writel_relaxed(reg_val, CBCR_REG(branch));
398 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
399
400 /* Wait for clock to disable before continuing. */
401 branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
402 CBCR_REG(branch), BRANCH_OFF);
403}
404
405static int branch_cdiv_set_rate(struct branch_clk *branch, unsigned long rate)
406{
407 unsigned long flags;
408 u32 regval;
409
410 if (rate > branch->max_div)
411 return -EINVAL;
412
413 spin_lock_irqsave(&local_clock_reg_lock, flags);
414 regval = readl_relaxed(CBCR_REG(branch));
415 regval &= ~CBCR_BRANCH_CDIV_MASK;
416 regval |= CBCR_BRANCH_CDIV_MASKED(rate);
417 writel_relaxed(regval, CBCR_REG(branch));
418 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
419
420 return 0;
421}
422
423static int branch_clk_set_rate(struct clk *c, unsigned long rate)
424{
425 struct branch_clk *branch = to_branch_clk(c);
426
427 if (branch->max_div)
428 return branch_cdiv_set_rate(branch, rate);
429
430 if (!branch->has_sibling)
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700431 return clk_set_rate(c->parent, rate);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700432
433 return -EPERM;
434}
435
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700436static long branch_clk_round_rate(struct clk *c, unsigned long rate)
437{
438 struct branch_clk *branch = to_branch_clk(c);
439
440 if (branch->max_div)
441 return rate <= (branch->max_div) ? rate : -EPERM;
442
443 if (!branch->has_sibling)
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700444 return clk_round_rate(c->parent, rate);
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700445
446 return -EPERM;
447}
448
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700449static unsigned long branch_clk_get_rate(struct clk *c)
450{
451 struct branch_clk *branch = to_branch_clk(c);
452
453 if (branch->max_div)
454 return branch->c.rate;
455
456 if (!branch->has_sibling)
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700457 return clk_get_rate(c->parent);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700458
459 return 0;
460}
461
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700462static int branch_clk_list_rate(struct clk *c, unsigned n)
463{
Patrick Daly13e22ed2012-10-11 14:31:11 -0700464 int level, fmax = 0, rate;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700465 struct branch_clk *branch = to_branch_clk(c);
Patrick Daly13e22ed2012-10-11 14:31:11 -0700466 struct clk *parent = c->parent;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700467
468 if (branch->has_sibling == 1)
469 return -ENXIO;
470
Patrick Daly13e22ed2012-10-11 14:31:11 -0700471 if (!parent || !parent->ops->list_rate)
472 return -ENXIO;
473
474 /* Find max frequency supported within voltage constraints. */
475 if (!parent->vdd_class) {
476 fmax = INT_MAX;
477 } else {
478 for (level = 0; level < parent->num_fmax; level++)
479 if (parent->fmax[level])
480 fmax = parent->fmax[level];
481 }
482
483 rate = parent->ops->list_rate(parent, n);
484 if (rate <= fmax)
485 return rate;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700486 else
Vikram Mulukutlae0589fc2012-09-20 12:19:16 -0700487 return -ENXIO;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700488}
489
490static enum handoff branch_clk_handoff(struct clk *c)
491{
492 struct branch_clk *branch = to_branch_clk(c);
493 u32 cbcr_regval;
494
495 cbcr_regval = readl_relaxed(CBCR_REG(branch));
496 if ((cbcr_regval & CBCR_BRANCH_OFF_BIT))
497 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700498
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700499 if (c->parent) {
500 if (c->parent->ops->handoff)
501 return c->parent->ops->handoff(c->parent);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700502 }
503
504 return HANDOFF_ENABLED_CLK;
505}
506
507static int __branch_clk_reset(void __iomem *bcr_reg,
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700508 enum clk_reset_action action)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700509{
510 int ret = 0;
511 unsigned long flags;
512 u32 reg_val;
513
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700514 spin_lock_irqsave(&local_clock_reg_lock, flags);
515 reg_val = readl_relaxed(bcr_reg);
516 switch (action) {
517 case CLK_RESET_ASSERT:
518 reg_val |= BCR_BLK_ARES_BIT;
519 break;
520 case CLK_RESET_DEASSERT:
521 reg_val &= ~BCR_BLK_ARES_BIT;
522 break;
523 default:
524 ret = -EINVAL;
525 }
526 writel_relaxed(reg_val, bcr_reg);
527 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
528
529 /* Make sure write is issued before returning. */
530 mb();
531
532 return ret;
533}
534
535static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
536{
537 struct branch_clk *branch = to_branch_clk(c);
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700538
539 if (!branch->bcr_reg) {
540 WARN("clk_reset called on an unsupported clock (%s)\n",
541 c->dbg_name);
542 return -EPERM;
543 }
544 return __branch_clk_reset(BCR_REG(branch), action);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700545}
546
547/*
548 * Voteable clock functions
549 */
550static int local_vote_clk_reset(struct clk *c, enum clk_reset_action action)
551{
Vikram Mulukutla27784c02012-06-06 13:37:36 -0700552 struct local_vote_clk *vclk = to_local_vote_clk(c);
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700553
554 if (!vclk->bcr_reg) {
555 WARN("clk_reset called on an unsupported clock (%s)\n",
556 c->dbg_name);
557 return -EPERM;
558 }
559 return __branch_clk_reset(BCR_REG(vclk), action);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700560}
561
562static int local_vote_clk_enable(struct clk *c)
563{
564 unsigned long flags;
565 u32 ena;
566 struct local_vote_clk *vclk = to_local_vote_clk(c);
567
568 spin_lock_irqsave(&local_clock_reg_lock, flags);
569 ena = readl_relaxed(VOTE_REG(vclk));
570 ena |= vclk->en_mask;
571 writel_relaxed(ena, VOTE_REG(vclk));
572 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
573
574 branch_clk_halt_check(vclk->halt_check, c->dbg_name, CBCR_REG(vclk),
575 BRANCH_ON);
576
577 return 0;
578}
579
580static void local_vote_clk_disable(struct clk *c)
581{
582 unsigned long flags;
583 u32 ena;
584 struct local_vote_clk *vclk = to_local_vote_clk(c);
585
586 spin_lock_irqsave(&local_clock_reg_lock, flags);
587 ena = readl_relaxed(VOTE_REG(vclk));
588 ena &= ~vclk->en_mask;
589 writel_relaxed(ena, VOTE_REG(vclk));
590 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
591}
592
593static enum handoff local_vote_clk_handoff(struct clk *c)
594{
595 struct local_vote_clk *vclk = to_local_vote_clk(c);
596 u32 vote_regval;
597
598 /* Is the branch voted on by apps? */
599 vote_regval = readl_relaxed(VOTE_REG(vclk));
600 if (!(vote_regval & vclk->en_mask))
601 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700602
603 return HANDOFF_ENABLED_CLK;
604}
605
Matt Wagantalledf2fad2012-08-06 16:11:46 -0700606struct clk_ops clk_ops_empty;
607
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700608struct clk_ops clk_ops_rcg = {
Stephen Boydd86d1f22012-01-24 17:36:34 -0800609 .enable = rcg_clk_prepare,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700610 .set_rate = rcg_clk_set_rate,
611 .list_rate = rcg_clk_list_rate,
612 .round_rate = rcg_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700613 .handoff = rcg_clk_handoff,
614};
615
616struct clk_ops clk_ops_rcg_mnd = {
Stephen Boydd86d1f22012-01-24 17:36:34 -0800617 .enable = rcg_clk_prepare,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700618 .set_rate = rcg_clk_set_rate,
619 .list_rate = rcg_clk_list_rate,
620 .round_rate = rcg_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700621 .handoff = rcg_mnd_clk_handoff,
622};
623
624struct clk_ops clk_ops_branch = {
625 .enable = branch_clk_enable,
626 .disable = branch_clk_disable,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700627 .set_rate = branch_clk_set_rate,
628 .get_rate = branch_clk_get_rate,
629 .list_rate = branch_clk_list_rate,
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700630 .round_rate = branch_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700631 .reset = branch_clk_reset,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700632 .handoff = branch_clk_handoff,
633};
634
635struct clk_ops clk_ops_vote = {
636 .enable = local_vote_clk_enable,
637 .disable = local_vote_clk_disable,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700638 .reset = local_vote_clk_reset,
639 .handoff = local_vote_clk_handoff,
640};