blob: dd78557dd514d2d50a14ba68a99e71eed187edba [file] [log] [blame]
Saravana Kannanc85ecf92013-01-21 17:58:35 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Vikram Mulukutla8810e342011-10-20 20:26:53 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/err.h>
19#include <linux/ctype.h>
20#include <linux/bitops.h>
21#include <linux/io.h>
22#include <linux/spinlock.h>
23#include <linux/delay.h>
24#include <linux/clk.h>
25
26#include <mach/clk.h>
Matt Wagantall33d01f52012-02-23 23:27:44 -080027#include <mach/clk-provider.h>
Vikram Mulukutla8810e342011-10-20 20:26:53 -070028
Vikram Mulukutla8810e342011-10-20 20:26:53 -070029#include "clock-local2.h"
30
31/*
32 * When enabling/disabling a clock, check the halt bit up to this number
33 * number of times (with a 1 us delay in between) before continuing.
34 */
35#define HALT_CHECK_MAX_LOOPS 200
36/* For clock without halt checking, wait this long after enables/disables. */
37#define HALT_CHECK_DELAY_US 10
38
39/*
40 * When updating an RCG configuration, check the update bit up to this number
41 * number of times (with a 1 us delay in between) before continuing.
42 */
43#define UPDATE_CHECK_MAX_LOOPS 200
44
45DEFINE_SPINLOCK(local_clock_reg_lock);
46struct clk_freq_tbl rcg_dummy_freq = F_END;
47
48#define CMD_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg)
49#define CFG_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x4)
50#define M_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x8)
51#define N_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0xC)
52#define D_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x10)
53#define CBCR_REG(x) (*(x)->base + (x)->cbcr_reg)
54#define BCR_REG(x) (*(x)->base + (x)->bcr_reg)
55#define VOTE_REG(x) (*(x)->base + (x)->vote_reg)
56
57/*
58 * Important clock bit positions and masks
59 */
60#define CMD_RCGR_ROOT_ENABLE_BIT BIT(1)
61#define CBCR_BRANCH_ENABLE_BIT BIT(0)
62#define CBCR_BRANCH_OFF_BIT BIT(31)
63#define CMD_RCGR_CONFIG_UPDATE_BIT BIT(0)
64#define CMD_RCGR_ROOT_STATUS_BIT BIT(31)
65#define BCR_BLK_ARES_BIT BIT(0)
66#define CBCR_HW_CTL_BIT BIT(1)
67#define CFG_RCGR_DIV_MASK BM(4, 0)
68#define CFG_RCGR_SRC_SEL_MASK BM(10, 8)
69#define MND_MODE_MASK BM(13, 12)
70#define MND_DUAL_EDGE_MODE_BVAL BVAL(13, 12, 0x2)
71#define CMD_RCGR_CONFIG_DIRTY_MASK BM(7, 4)
Saravana Kannanc85ecf92013-01-21 17:58:35 -080072#define CBCR_CDIV_LSB 16
73#define CBCR_CDIV_MSB 24
Vikram Mulukutla8810e342011-10-20 20:26:53 -070074
75enum branch_state {
76 BRANCH_ON,
77 BRANCH_OFF,
78};
79
80/*
81 * RCG functions
82 */
83
84/*
85 * Update an RCG with a new configuration. This may include a new M, N, or D
86 * value, source selection or pre-divider value.
87 *
88 */
89static void rcg_update_config(struct rcg_clk *rcg)
90{
91 u32 cmd_rcgr_regval, count;
92
93 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
94 cmd_rcgr_regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
95 writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
96
97 /* Wait for update to take effect */
98 for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
99 if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
100 CMD_RCGR_CONFIG_UPDATE_BIT))
101 return;
102 udelay(1);
103 }
104
105 WARN(count == 0, "%s: rcg didn't update its configuration.",
106 rcg->c.dbg_name);
107}
108
109/* RCG set rate function for clocks with Half Integer Dividers. */
110void set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
111{
112 u32 cfg_regval;
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700113 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700114
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700115 spin_lock_irqsave(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700116 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
117 cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
118 cfg_regval |= nf->div_src_val;
119 writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
120
121 rcg_update_config(rcg);
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700122 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700123}
124
125/* RCG set rate function for clocks with MND & Half Integer Dividers. */
126void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
127{
128 u32 cfg_regval;
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700129 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700130
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700131 spin_lock_irqsave(&local_clock_reg_lock, flags);
132 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700133 writel_relaxed(nf->m_val, M_REG(rcg));
134 writel_relaxed(nf->n_val, N_REG(rcg));
135 writel_relaxed(nf->d_val, D_REG(rcg));
136
137 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
138 cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
139 cfg_regval |= nf->div_src_val;
140
141 /* Activate or disable the M/N:D divider as necessary */
142 cfg_regval &= ~MND_MODE_MASK;
143 if (nf->n_val != 0)
144 cfg_regval |= MND_DUAL_EDGE_MODE_BVAL;
145 writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
146
147 rcg_update_config(rcg);
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700148 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700149}
150
Stephen Boydd86d1f22012-01-24 17:36:34 -0800151static int rcg_clk_prepare(struct clk *c)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700152{
153 struct rcg_clk *rcg = to_rcg_clk(c);
154
155 WARN(rcg->current_freq == &rcg_dummy_freq,
Stephen Boydd86d1f22012-01-24 17:36:34 -0800156 "Attempting to prepare %s before setting its rate. "
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700157 "Set the rate first!\n", rcg->c.dbg_name);
158
159 return 0;
160}
161
162static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
163{
164 struct clk_freq_tbl *cf, *nf;
165 struct rcg_clk *rcg = to_rcg_clk(c);
Stephen Boydd86d1f22012-01-24 17:36:34 -0800166 int rc;
167 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700168
169 for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
170 && nf->freq_hz != rate; nf++)
171 ;
172
173 if (nf->freq_hz == FREQ_END)
174 return -EINVAL;
175
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700176 cf = rcg->current_freq;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700177
Stephen Boydd86d1f22012-01-24 17:36:34 -0800178 /* Enable source clock dependency for the new freq. */
179 if (c->prepare_count) {
180 rc = clk_prepare(nf->src_clk);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700181 if (rc)
Stephen Boydd86d1f22012-01-24 17:36:34 -0800182 return rc;
183 }
184
185 spin_lock_irqsave(&c->lock, flags);
186 if (c->count) {
187 rc = clk_enable(nf->src_clk);
188 if (rc) {
189 spin_unlock_irqrestore(&c->lock, flags);
190 clk_unprepare(nf->src_clk);
191 return rc;
192 }
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700193 }
194
195 BUG_ON(!rcg->set_rate);
196
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700197 /* Perform clock-specific frequency switch operations. */
198 rcg->set_rate(rcg, nf);
199
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700200 /* Release source requirements of the old freq. */
Stephen Boydd86d1f22012-01-24 17:36:34 -0800201 if (c->count)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700202 clk_disable(cf->src_clk);
Stephen Boydd86d1f22012-01-24 17:36:34 -0800203 spin_unlock_irqrestore(&c->lock, flags);
204
205 if (c->prepare_count)
206 clk_unprepare(cf->src_clk);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700207
208 rcg->current_freq = nf;
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700209 c->parent = nf->src_clk;
Stephen Boydd86d1f22012-01-24 17:36:34 -0800210
211 return 0;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700212}
213
214/* Return a supported rate that's at least the specified rate. */
215static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
216{
217 struct rcg_clk *rcg = to_rcg_clk(c);
218 struct clk_freq_tbl *f;
219
220 for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
221 if (f->freq_hz >= rate)
222 return f->freq_hz;
223
224 return -EPERM;
225}
226
227/* Return the nth supported frequency for a given clock. */
228static int rcg_clk_list_rate(struct clk *c, unsigned n)
229{
230 struct rcg_clk *rcg = to_rcg_clk(c);
231
232 if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
233 return -ENXIO;
234
235 return (rcg->freq_tbl + n)->freq_hz;
236}
237
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800238static struct clk *_rcg_clk_get_parent(struct rcg_clk *rcg, int has_mnd)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700239{
240 u32 n_regval = 0, m_regval = 0, d_regval = 0;
241 u32 cfg_regval;
242 struct clk_freq_tbl *freq;
243 u32 cmd_rcgr_regval;
244
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700245 /* Is there a pending configuration? */
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800246 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700247 if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800248 return NULL;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700249
250 /* Get values of m, n, d, div and src_sel registers. */
251 if (has_mnd) {
252 m_regval = readl_relaxed(M_REG(rcg));
253 n_regval = readl_relaxed(N_REG(rcg));
254 d_regval = readl_relaxed(D_REG(rcg));
255
256 /*
257 * The n and d values stored in the frequency tables are sign
258 * extended to 32 bits. The n and d values in the registers are
259 * sign extended to 8 or 16 bits. Sign extend the values read
260 * from the registers so that they can be compared to the
261 * values in the frequency tables.
262 */
263 n_regval |= (n_regval >> 8) ? BM(31, 16) : BM(31, 8);
264 d_regval |= (d_regval >> 8) ? BM(31, 16) : BM(31, 8);
265 }
266
267 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
268 cfg_regval &= CFG_RCGR_SRC_SEL_MASK | CFG_RCGR_DIV_MASK
269 | MND_MODE_MASK;
270
271 /* If mnd counter is present, check if it's in use. */
272 has_mnd = (has_mnd) &&
273 ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL);
274
275 /*
276 * Clear out the mn counter mode bits since we now want to compare only
277 * the source mux selection and pre-divider values in the registers.
278 */
279 cfg_regval &= ~MND_MODE_MASK;
280
281 /* Figure out what rate the rcg is running at */
282 for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
283 if (freq->div_src_val != cfg_regval)
284 continue;
285 if (has_mnd) {
286 if (freq->m_val != m_regval)
287 continue;
288 if (freq->n_val != n_regval)
289 continue;
290 if (freq->d_val != d_regval)
291 continue;
292 }
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700293 break;
294 }
295
296 /* No known frequency found */
297 if (freq->freq_hz == FREQ_END)
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800298 return NULL;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700299
300 rcg->current_freq = freq;
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800301 return freq->src_clk;
302}
303
304static enum handoff _rcg_clk_handoff(struct rcg_clk *rcg)
305{
306 u32 cmd_rcgr_regval;
307
308 if (rcg->current_freq && rcg->current_freq->freq_hz != FREQ_END)
309 rcg->c.rate = rcg->current_freq->freq_hz;
310
311 /* Is the root enabled? */
312 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
313 if ((cmd_rcgr_regval & CMD_RCGR_ROOT_STATUS_BIT))
314 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700315
316 return HANDOFF_ENABLED_CLK;
317}
318
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800319static struct clk *rcg_mnd_clk_get_parent(struct clk *c)
320{
321 return _rcg_clk_get_parent(to_rcg_clk(c), 1);
322}
323
324static struct clk *rcg_clk_get_parent(struct clk *c)
325{
326 return _rcg_clk_get_parent(to_rcg_clk(c), 0);
327}
328
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700329static enum handoff rcg_mnd_clk_handoff(struct clk *c)
330{
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800331 return _rcg_clk_handoff(to_rcg_clk(c));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700332}
333
334static enum handoff rcg_clk_handoff(struct clk *c)
335{
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800336 return _rcg_clk_handoff(to_rcg_clk(c));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700337}
338
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700339#define BRANCH_CHECK_MASK BM(31, 28)
340#define BRANCH_ON_VAL BVAL(31, 28, 0x0)
341#define BRANCH_OFF_VAL BVAL(31, 28, 0x8)
342#define BRANCH_NOC_FSM_ON_VAL BVAL(31, 28, 0x2)
343
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700344/*
345 * Branch clock functions
346 */
347static void branch_clk_halt_check(u32 halt_check, const char *clk_name,
348 void __iomem *cbcr_reg,
349 enum branch_state br_status)
350{
Vikram Mulukutla86b9fa62012-05-02 16:39:14 -0700351 char *status_str = (br_status == BRANCH_ON) ? "off" : "on";
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700352
353 /*
354 * Use a memory barrier since some halt status registers are
355 * not within the same 1K segment as the branch/root enable
356 * registers. It's also needed in the udelay() case to ensure
357 * the delay starts after the branch disable.
358 */
359 mb();
360
361 if (halt_check == DELAY || halt_check == HALT_VOTED) {
362 udelay(HALT_CHECK_DELAY_US);
363 } else if (halt_check == HALT) {
364 int count;
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700365 u32 val;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700366 for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700367 val = readl_relaxed(cbcr_reg);
368 val &= BRANCH_CHECK_MASK;
369 switch (br_status) {
370 case BRANCH_ON:
371 if (val == BRANCH_ON_VAL
372 || val == BRANCH_NOC_FSM_ON_VAL)
373 return;
374 break;
375
376 case BRANCH_OFF:
377 if (val == BRANCH_OFF_VAL)
378 return;
379 break;
380 };
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700381 udelay(1);
382 }
383 WARN(count == 0, "%s status stuck %s", clk_name, status_str);
384 }
385}
386
387static int branch_clk_enable(struct clk *c)
388{
389 unsigned long flags;
390 u32 cbcr_val;
391 struct branch_clk *branch = to_branch_clk(c);
392
393 spin_lock_irqsave(&local_clock_reg_lock, flags);
394 cbcr_val = readl_relaxed(CBCR_REG(branch));
395 cbcr_val |= CBCR_BRANCH_ENABLE_BIT;
396 writel_relaxed(cbcr_val, CBCR_REG(branch));
397 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
398
399 /* Wait for clock to enable before continuing. */
400 branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
401 CBCR_REG(branch), BRANCH_ON);
402
403 return 0;
404}
405
406static void branch_clk_disable(struct clk *c)
407{
408 unsigned long flags;
409 struct branch_clk *branch = to_branch_clk(c);
410 u32 reg_val;
411
412 spin_lock_irqsave(&local_clock_reg_lock, flags);
413 reg_val = readl_relaxed(CBCR_REG(branch));
414 reg_val &= ~CBCR_BRANCH_ENABLE_BIT;
415 writel_relaxed(reg_val, CBCR_REG(branch));
416 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
417
418 /* Wait for clock to disable before continuing. */
419 branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
420 CBCR_REG(branch), BRANCH_OFF);
421}
422
423static int branch_cdiv_set_rate(struct branch_clk *branch, unsigned long rate)
424{
425 unsigned long flags;
426 u32 regval;
427
428 if (rate > branch->max_div)
429 return -EINVAL;
430
431 spin_lock_irqsave(&local_clock_reg_lock, flags);
432 regval = readl_relaxed(CBCR_REG(branch));
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800433 regval &= ~BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
434 regval |= BVAL(CBCR_CDIV_MSB, CBCR_CDIV_LSB, rate);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700435 writel_relaxed(regval, CBCR_REG(branch));
436 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
437
438 return 0;
439}
440
441static int branch_clk_set_rate(struct clk *c, unsigned long rate)
442{
443 struct branch_clk *branch = to_branch_clk(c);
444
445 if (branch->max_div)
446 return branch_cdiv_set_rate(branch, rate);
447
448 if (!branch->has_sibling)
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700449 return clk_set_rate(c->parent, rate);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700450
451 return -EPERM;
452}
453
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700454static long branch_clk_round_rate(struct clk *c, unsigned long rate)
455{
456 struct branch_clk *branch = to_branch_clk(c);
457
458 if (branch->max_div)
459 return rate <= (branch->max_div) ? rate : -EPERM;
460
461 if (!branch->has_sibling)
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700462 return clk_round_rate(c->parent, rate);
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700463
464 return -EPERM;
465}
466
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700467static unsigned long branch_clk_get_rate(struct clk *c)
468{
469 struct branch_clk *branch = to_branch_clk(c);
470
471 if (branch->max_div)
472 return branch->c.rate;
473
474 if (!branch->has_sibling)
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700475 return clk_get_rate(c->parent);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700476
477 return 0;
478}
479
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700480static int branch_clk_list_rate(struct clk *c, unsigned n)
481{
Patrick Daly13e22ed2012-10-11 14:31:11 -0700482 int level, fmax = 0, rate;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700483 struct branch_clk *branch = to_branch_clk(c);
Patrick Daly13e22ed2012-10-11 14:31:11 -0700484 struct clk *parent = c->parent;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700485
486 if (branch->has_sibling == 1)
487 return -ENXIO;
488
Patrick Daly13e22ed2012-10-11 14:31:11 -0700489 if (!parent || !parent->ops->list_rate)
490 return -ENXIO;
491
492 /* Find max frequency supported within voltage constraints. */
493 if (!parent->vdd_class) {
494 fmax = INT_MAX;
495 } else {
496 for (level = 0; level < parent->num_fmax; level++)
497 if (parent->fmax[level])
498 fmax = parent->fmax[level];
499 }
500
501 rate = parent->ops->list_rate(parent, n);
502 if (rate <= fmax)
503 return rate;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700504 else
Vikram Mulukutlae0589fc2012-09-20 12:19:16 -0700505 return -ENXIO;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700506}
507
508static enum handoff branch_clk_handoff(struct clk *c)
509{
510 struct branch_clk *branch = to_branch_clk(c);
511 u32 cbcr_regval;
512
513 cbcr_regval = readl_relaxed(CBCR_REG(branch));
514 if ((cbcr_regval & CBCR_BRANCH_OFF_BIT))
515 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700516
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800517 if (branch->max_div) {
518 cbcr_regval &= BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
519 cbcr_regval >>= CBCR_CDIV_LSB;
520 c->rate = cbcr_regval;
521 } else if (!branch->has_sibling) {
522 c->rate = clk_get_rate(c->parent);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700523 }
524
525 return HANDOFF_ENABLED_CLK;
526}
527
528static int __branch_clk_reset(void __iomem *bcr_reg,
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700529 enum clk_reset_action action)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700530{
531 int ret = 0;
532 unsigned long flags;
533 u32 reg_val;
534
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700535 spin_lock_irqsave(&local_clock_reg_lock, flags);
536 reg_val = readl_relaxed(bcr_reg);
537 switch (action) {
538 case CLK_RESET_ASSERT:
539 reg_val |= BCR_BLK_ARES_BIT;
540 break;
541 case CLK_RESET_DEASSERT:
542 reg_val &= ~BCR_BLK_ARES_BIT;
543 break;
544 default:
545 ret = -EINVAL;
546 }
547 writel_relaxed(reg_val, bcr_reg);
548 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
549
550 /* Make sure write is issued before returning. */
551 mb();
552
553 return ret;
554}
555
556static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
557{
558 struct branch_clk *branch = to_branch_clk(c);
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700559
560 if (!branch->bcr_reg) {
561 WARN("clk_reset called on an unsupported clock (%s)\n",
562 c->dbg_name);
563 return -EPERM;
564 }
565 return __branch_clk_reset(BCR_REG(branch), action);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700566}
567
568/*
569 * Voteable clock functions
570 */
571static int local_vote_clk_reset(struct clk *c, enum clk_reset_action action)
572{
Vikram Mulukutla27784c02012-06-06 13:37:36 -0700573 struct local_vote_clk *vclk = to_local_vote_clk(c);
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700574
575 if (!vclk->bcr_reg) {
576 WARN("clk_reset called on an unsupported clock (%s)\n",
577 c->dbg_name);
578 return -EPERM;
579 }
580 return __branch_clk_reset(BCR_REG(vclk), action);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700581}
582
583static int local_vote_clk_enable(struct clk *c)
584{
585 unsigned long flags;
586 u32 ena;
587 struct local_vote_clk *vclk = to_local_vote_clk(c);
588
589 spin_lock_irqsave(&local_clock_reg_lock, flags);
590 ena = readl_relaxed(VOTE_REG(vclk));
591 ena |= vclk->en_mask;
592 writel_relaxed(ena, VOTE_REG(vclk));
593 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
594
595 branch_clk_halt_check(vclk->halt_check, c->dbg_name, CBCR_REG(vclk),
596 BRANCH_ON);
597
598 return 0;
599}
600
601static void local_vote_clk_disable(struct clk *c)
602{
603 unsigned long flags;
604 u32 ena;
605 struct local_vote_clk *vclk = to_local_vote_clk(c);
606
607 spin_lock_irqsave(&local_clock_reg_lock, flags);
608 ena = readl_relaxed(VOTE_REG(vclk));
609 ena &= ~vclk->en_mask;
610 writel_relaxed(ena, VOTE_REG(vclk));
611 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
612}
613
614static enum handoff local_vote_clk_handoff(struct clk *c)
615{
616 struct local_vote_clk *vclk = to_local_vote_clk(c);
617 u32 vote_regval;
618
619 /* Is the branch voted on by apps? */
620 vote_regval = readl_relaxed(VOTE_REG(vclk));
621 if (!(vote_regval & vclk->en_mask))
622 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700623
624 return HANDOFF_ENABLED_CLK;
625}
626
Matt Wagantalledf2fad2012-08-06 16:11:46 -0700627struct clk_ops clk_ops_empty;
628
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700629struct clk_ops clk_ops_rcg = {
Stephen Boydd86d1f22012-01-24 17:36:34 -0800630 .enable = rcg_clk_prepare,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700631 .set_rate = rcg_clk_set_rate,
632 .list_rate = rcg_clk_list_rate,
633 .round_rate = rcg_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700634 .handoff = rcg_clk_handoff,
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800635 .get_parent = rcg_clk_get_parent,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700636};
637
638struct clk_ops clk_ops_rcg_mnd = {
Stephen Boydd86d1f22012-01-24 17:36:34 -0800639 .enable = rcg_clk_prepare,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700640 .set_rate = rcg_clk_set_rate,
641 .list_rate = rcg_clk_list_rate,
642 .round_rate = rcg_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700643 .handoff = rcg_mnd_clk_handoff,
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800644 .get_parent = rcg_mnd_clk_get_parent,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700645};
646
647struct clk_ops clk_ops_branch = {
648 .enable = branch_clk_enable,
649 .disable = branch_clk_disable,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700650 .set_rate = branch_clk_set_rate,
651 .get_rate = branch_clk_get_rate,
652 .list_rate = branch_clk_list_rate,
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700653 .round_rate = branch_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700654 .reset = branch_clk_reset,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700655 .handoff = branch_clk_handoff,
656};
657
658struct clk_ops clk_ops_vote = {
659 .enable = local_vote_clk_enable,
660 .disable = local_vote_clk_disable,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700661 .reset = local_vote_clk_reset,
662 .handoff = local_vote_clk_handoff,
663};