blob: 2879b49b23cafd92867db5c9605684ec9d3c250c [file] [log] [blame]
Saravana Kannanc85ecf92013-01-21 17:58:35 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Vikram Mulukutla8810e342011-10-20 20:26:53 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/err.h>
19#include <linux/ctype.h>
20#include <linux/bitops.h>
21#include <linux/io.h>
22#include <linux/spinlock.h>
23#include <linux/delay.h>
24#include <linux/clk.h>
25
26#include <mach/clk.h>
Matt Wagantall33d01f52012-02-23 23:27:44 -080027#include <mach/clk-provider.h>
Vikram Mulukutla8810e342011-10-20 20:26:53 -070028
Vikram Mulukutla8810e342011-10-20 20:26:53 -070029#include "clock-local2.h"
30
31/*
32 * When enabling/disabling a clock, check the halt bit up to this number
33 * number of times (with a 1 us delay in between) before continuing.
34 */
35#define HALT_CHECK_MAX_LOOPS 200
36/* For clock without halt checking, wait this long after enables/disables. */
37#define HALT_CHECK_DELAY_US 10
38
39/*
40 * When updating an RCG configuration, check the update bit up to this number
41 * number of times (with a 1 us delay in between) before continuing.
42 */
43#define UPDATE_CHECK_MAX_LOOPS 200
44
45DEFINE_SPINLOCK(local_clock_reg_lock);
46struct clk_freq_tbl rcg_dummy_freq = F_END;
47
48#define CMD_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg)
49#define CFG_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x4)
50#define M_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x8)
51#define N_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0xC)
52#define D_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x10)
53#define CBCR_REG(x) (*(x)->base + (x)->cbcr_reg)
54#define BCR_REG(x) (*(x)->base + (x)->bcr_reg)
55#define VOTE_REG(x) (*(x)->base + (x)->vote_reg)
56
57/*
58 * Important clock bit positions and masks
59 */
60#define CMD_RCGR_ROOT_ENABLE_BIT BIT(1)
61#define CBCR_BRANCH_ENABLE_BIT BIT(0)
62#define CBCR_BRANCH_OFF_BIT BIT(31)
63#define CMD_RCGR_CONFIG_UPDATE_BIT BIT(0)
64#define CMD_RCGR_ROOT_STATUS_BIT BIT(31)
65#define BCR_BLK_ARES_BIT BIT(0)
66#define CBCR_HW_CTL_BIT BIT(1)
67#define CFG_RCGR_DIV_MASK BM(4, 0)
68#define CFG_RCGR_SRC_SEL_MASK BM(10, 8)
69#define MND_MODE_MASK BM(13, 12)
70#define MND_DUAL_EDGE_MODE_BVAL BVAL(13, 12, 0x2)
71#define CMD_RCGR_CONFIG_DIRTY_MASK BM(7, 4)
Saravana Kannanc85ecf92013-01-21 17:58:35 -080072#define CBCR_CDIV_LSB 16
73#define CBCR_CDIV_MSB 24
Vikram Mulukutla8810e342011-10-20 20:26:53 -070074
75enum branch_state {
76 BRANCH_ON,
77 BRANCH_OFF,
78};
79
80/*
81 * RCG functions
82 */
83
84/*
85 * Update an RCG with a new configuration. This may include a new M, N, or D
86 * value, source selection or pre-divider value.
87 *
88 */
89static void rcg_update_config(struct rcg_clk *rcg)
90{
91 u32 cmd_rcgr_regval, count;
92
93 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
94 cmd_rcgr_regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
95 writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
96
97 /* Wait for update to take effect */
98 for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
99 if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
100 CMD_RCGR_CONFIG_UPDATE_BIT))
101 return;
102 udelay(1);
103 }
104
105 WARN(count == 0, "%s: rcg didn't update its configuration.",
106 rcg->c.dbg_name);
107}
108
109/* RCG set rate function for clocks with Half Integer Dividers. */
110void set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
111{
112 u32 cfg_regval;
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700113 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700114
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700115 spin_lock_irqsave(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700116 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
117 cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
118 cfg_regval |= nf->div_src_val;
119 writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
120
121 rcg_update_config(rcg);
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700122 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700123}
124
125/* RCG set rate function for clocks with MND & Half Integer Dividers. */
126void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
127{
128 u32 cfg_regval;
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700129 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700130
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700131 spin_lock_irqsave(&local_clock_reg_lock, flags);
132 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700133 writel_relaxed(nf->m_val, M_REG(rcg));
134 writel_relaxed(nf->n_val, N_REG(rcg));
135 writel_relaxed(nf->d_val, D_REG(rcg));
136
137 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
138 cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
139 cfg_regval |= nf->div_src_val;
140
141 /* Activate or disable the M/N:D divider as necessary */
142 cfg_regval &= ~MND_MODE_MASK;
143 if (nf->n_val != 0)
144 cfg_regval |= MND_DUAL_EDGE_MODE_BVAL;
145 writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
146
147 rcg_update_config(rcg);
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700148 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700149}
150
Stephen Boydd86d1f22012-01-24 17:36:34 -0800151static int rcg_clk_prepare(struct clk *c)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700152{
153 struct rcg_clk *rcg = to_rcg_clk(c);
154
155 WARN(rcg->current_freq == &rcg_dummy_freq,
Stephen Boydd86d1f22012-01-24 17:36:34 -0800156 "Attempting to prepare %s before setting its rate. "
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700157 "Set the rate first!\n", rcg->c.dbg_name);
158
159 return 0;
160}
161
162static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
163{
164 struct clk_freq_tbl *cf, *nf;
165 struct rcg_clk *rcg = to_rcg_clk(c);
Stephen Boydd86d1f22012-01-24 17:36:34 -0800166 int rc;
167 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700168
169 for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
170 && nf->freq_hz != rate; nf++)
171 ;
172
173 if (nf->freq_hz == FREQ_END)
174 return -EINVAL;
175
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700176 cf = rcg->current_freq;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700177
Stephen Boydd86d1f22012-01-24 17:36:34 -0800178 /* Enable source clock dependency for the new freq. */
179 if (c->prepare_count) {
180 rc = clk_prepare(nf->src_clk);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700181 if (rc)
Stephen Boydd86d1f22012-01-24 17:36:34 -0800182 return rc;
183 }
184
185 spin_lock_irqsave(&c->lock, flags);
186 if (c->count) {
187 rc = clk_enable(nf->src_clk);
188 if (rc) {
189 spin_unlock_irqrestore(&c->lock, flags);
190 clk_unprepare(nf->src_clk);
191 return rc;
192 }
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700193 }
194
195 BUG_ON(!rcg->set_rate);
196
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700197 /* Perform clock-specific frequency switch operations. */
198 rcg->set_rate(rcg, nf);
199
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700200 /* Release source requirements of the old freq. */
Stephen Boydd86d1f22012-01-24 17:36:34 -0800201 if (c->count)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700202 clk_disable(cf->src_clk);
Stephen Boydd86d1f22012-01-24 17:36:34 -0800203 spin_unlock_irqrestore(&c->lock, flags);
204
205 if (c->prepare_count)
206 clk_unprepare(cf->src_clk);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700207
208 rcg->current_freq = nf;
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700209 c->parent = nf->src_clk;
Stephen Boydd86d1f22012-01-24 17:36:34 -0800210
211 return 0;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700212}
213
Tianyi Gou43215f372013-03-15 12:01:30 -0700214/*
215 * Return a supported rate that's at least the specified rate or
216 * the max supported rate if the specified rate is larger than the
217 * max supported rate.
218 */
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700219static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
220{
221 struct rcg_clk *rcg = to_rcg_clk(c);
222 struct clk_freq_tbl *f;
223
224 for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
225 if (f->freq_hz >= rate)
226 return f->freq_hz;
227
Tianyi Gou43215f372013-03-15 12:01:30 -0700228 f--;
229 return f->freq_hz;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700230}
231
232/* Return the nth supported frequency for a given clock. */
233static int rcg_clk_list_rate(struct clk *c, unsigned n)
234{
235 struct rcg_clk *rcg = to_rcg_clk(c);
236
237 if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
238 return -ENXIO;
239
240 return (rcg->freq_tbl + n)->freq_hz;
241}
242
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800243static struct clk *_rcg_clk_get_parent(struct rcg_clk *rcg, int has_mnd)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700244{
245 u32 n_regval = 0, m_regval = 0, d_regval = 0;
246 u32 cfg_regval;
247 struct clk_freq_tbl *freq;
248 u32 cmd_rcgr_regval;
249
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700250 /* Is there a pending configuration? */
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800251 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700252 if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800253 return NULL;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700254
255 /* Get values of m, n, d, div and src_sel registers. */
256 if (has_mnd) {
257 m_regval = readl_relaxed(M_REG(rcg));
258 n_regval = readl_relaxed(N_REG(rcg));
259 d_regval = readl_relaxed(D_REG(rcg));
260
261 /*
262 * The n and d values stored in the frequency tables are sign
263 * extended to 32 bits. The n and d values in the registers are
264 * sign extended to 8 or 16 bits. Sign extend the values read
265 * from the registers so that they can be compared to the
266 * values in the frequency tables.
267 */
268 n_regval |= (n_regval >> 8) ? BM(31, 16) : BM(31, 8);
269 d_regval |= (d_regval >> 8) ? BM(31, 16) : BM(31, 8);
270 }
271
272 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
273 cfg_regval &= CFG_RCGR_SRC_SEL_MASK | CFG_RCGR_DIV_MASK
274 | MND_MODE_MASK;
275
276 /* If mnd counter is present, check if it's in use. */
277 has_mnd = (has_mnd) &&
278 ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL);
279
280 /*
281 * Clear out the mn counter mode bits since we now want to compare only
282 * the source mux selection and pre-divider values in the registers.
283 */
284 cfg_regval &= ~MND_MODE_MASK;
285
286 /* Figure out what rate the rcg is running at */
287 for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
288 if (freq->div_src_val != cfg_regval)
289 continue;
290 if (has_mnd) {
291 if (freq->m_val != m_regval)
292 continue;
293 if (freq->n_val != n_regval)
294 continue;
295 if (freq->d_val != d_regval)
296 continue;
297 }
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700298 break;
299 }
300
301 /* No known frequency found */
302 if (freq->freq_hz == FREQ_END)
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800303 return NULL;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700304
305 rcg->current_freq = freq;
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800306 return freq->src_clk;
307}
308
309static enum handoff _rcg_clk_handoff(struct rcg_clk *rcg)
310{
311 u32 cmd_rcgr_regval;
312
313 if (rcg->current_freq && rcg->current_freq->freq_hz != FREQ_END)
314 rcg->c.rate = rcg->current_freq->freq_hz;
315
316 /* Is the root enabled? */
317 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
318 if ((cmd_rcgr_regval & CMD_RCGR_ROOT_STATUS_BIT))
319 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700320
321 return HANDOFF_ENABLED_CLK;
322}
323
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800324static struct clk *rcg_mnd_clk_get_parent(struct clk *c)
325{
326 return _rcg_clk_get_parent(to_rcg_clk(c), 1);
327}
328
329static struct clk *rcg_clk_get_parent(struct clk *c)
330{
331 return _rcg_clk_get_parent(to_rcg_clk(c), 0);
332}
333
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700334static enum handoff rcg_mnd_clk_handoff(struct clk *c)
335{
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800336 return _rcg_clk_handoff(to_rcg_clk(c));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700337}
338
339static enum handoff rcg_clk_handoff(struct clk *c)
340{
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800341 return _rcg_clk_handoff(to_rcg_clk(c));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700342}
343
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700344#define BRANCH_CHECK_MASK BM(31, 28)
345#define BRANCH_ON_VAL BVAL(31, 28, 0x0)
346#define BRANCH_OFF_VAL BVAL(31, 28, 0x8)
347#define BRANCH_NOC_FSM_ON_VAL BVAL(31, 28, 0x2)
348
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700349/*
350 * Branch clock functions
351 */
352static void branch_clk_halt_check(u32 halt_check, const char *clk_name,
353 void __iomem *cbcr_reg,
354 enum branch_state br_status)
355{
Vikram Mulukutla86b9fa62012-05-02 16:39:14 -0700356 char *status_str = (br_status == BRANCH_ON) ? "off" : "on";
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700357
358 /*
359 * Use a memory barrier since some halt status registers are
360 * not within the same 1K segment as the branch/root enable
361 * registers. It's also needed in the udelay() case to ensure
362 * the delay starts after the branch disable.
363 */
364 mb();
365
366 if (halt_check == DELAY || halt_check == HALT_VOTED) {
367 udelay(HALT_CHECK_DELAY_US);
368 } else if (halt_check == HALT) {
369 int count;
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700370 u32 val;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700371 for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700372 val = readl_relaxed(cbcr_reg);
373 val &= BRANCH_CHECK_MASK;
374 switch (br_status) {
375 case BRANCH_ON:
376 if (val == BRANCH_ON_VAL
377 || val == BRANCH_NOC_FSM_ON_VAL)
378 return;
379 break;
380
381 case BRANCH_OFF:
382 if (val == BRANCH_OFF_VAL)
383 return;
384 break;
385 };
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700386 udelay(1);
387 }
388 WARN(count == 0, "%s status stuck %s", clk_name, status_str);
389 }
390}
391
392static int branch_clk_enable(struct clk *c)
393{
394 unsigned long flags;
395 u32 cbcr_val;
396 struct branch_clk *branch = to_branch_clk(c);
397
398 spin_lock_irqsave(&local_clock_reg_lock, flags);
399 cbcr_val = readl_relaxed(CBCR_REG(branch));
400 cbcr_val |= CBCR_BRANCH_ENABLE_BIT;
401 writel_relaxed(cbcr_val, CBCR_REG(branch));
402 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
403
404 /* Wait for clock to enable before continuing. */
405 branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
406 CBCR_REG(branch), BRANCH_ON);
407
408 return 0;
409}
410
411static void branch_clk_disable(struct clk *c)
412{
413 unsigned long flags;
414 struct branch_clk *branch = to_branch_clk(c);
415 u32 reg_val;
416
417 spin_lock_irqsave(&local_clock_reg_lock, flags);
418 reg_val = readl_relaxed(CBCR_REG(branch));
419 reg_val &= ~CBCR_BRANCH_ENABLE_BIT;
420 writel_relaxed(reg_val, CBCR_REG(branch));
421 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
422
423 /* Wait for clock to disable before continuing. */
424 branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
425 CBCR_REG(branch), BRANCH_OFF);
426}
427
428static int branch_cdiv_set_rate(struct branch_clk *branch, unsigned long rate)
429{
430 unsigned long flags;
431 u32 regval;
432
433 if (rate > branch->max_div)
434 return -EINVAL;
435
436 spin_lock_irqsave(&local_clock_reg_lock, flags);
437 regval = readl_relaxed(CBCR_REG(branch));
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800438 regval &= ~BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
439 regval |= BVAL(CBCR_CDIV_MSB, CBCR_CDIV_LSB, rate);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700440 writel_relaxed(regval, CBCR_REG(branch));
441 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
442
443 return 0;
444}
445
446static int branch_clk_set_rate(struct clk *c, unsigned long rate)
447{
448 struct branch_clk *branch = to_branch_clk(c);
449
450 if (branch->max_div)
451 return branch_cdiv_set_rate(branch, rate);
452
453 if (!branch->has_sibling)
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700454 return clk_set_rate(c->parent, rate);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700455
456 return -EPERM;
457}
458
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700459static long branch_clk_round_rate(struct clk *c, unsigned long rate)
460{
461 struct branch_clk *branch = to_branch_clk(c);
462
463 if (branch->max_div)
464 return rate <= (branch->max_div) ? rate : -EPERM;
465
466 if (!branch->has_sibling)
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700467 return clk_round_rate(c->parent, rate);
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700468
469 return -EPERM;
470}
471
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700472static unsigned long branch_clk_get_rate(struct clk *c)
473{
474 struct branch_clk *branch = to_branch_clk(c);
475
476 if (branch->max_div)
477 return branch->c.rate;
478
479 if (!branch->has_sibling)
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700480 return clk_get_rate(c->parent);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700481
482 return 0;
483}
484
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700485static int branch_clk_list_rate(struct clk *c, unsigned n)
486{
Patrick Daly13e22ed2012-10-11 14:31:11 -0700487 int level, fmax = 0, rate;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700488 struct branch_clk *branch = to_branch_clk(c);
Patrick Daly13e22ed2012-10-11 14:31:11 -0700489 struct clk *parent = c->parent;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700490
491 if (branch->has_sibling == 1)
492 return -ENXIO;
493
Patrick Daly13e22ed2012-10-11 14:31:11 -0700494 if (!parent || !parent->ops->list_rate)
495 return -ENXIO;
496
497 /* Find max frequency supported within voltage constraints. */
498 if (!parent->vdd_class) {
499 fmax = INT_MAX;
500 } else {
501 for (level = 0; level < parent->num_fmax; level++)
502 if (parent->fmax[level])
503 fmax = parent->fmax[level];
504 }
505
506 rate = parent->ops->list_rate(parent, n);
507 if (rate <= fmax)
508 return rate;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700509 else
Vikram Mulukutlae0589fc2012-09-20 12:19:16 -0700510 return -ENXIO;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700511}
512
513static enum handoff branch_clk_handoff(struct clk *c)
514{
515 struct branch_clk *branch = to_branch_clk(c);
516 u32 cbcr_regval;
517
518 cbcr_regval = readl_relaxed(CBCR_REG(branch));
519 if ((cbcr_regval & CBCR_BRANCH_OFF_BIT))
520 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700521
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800522 if (branch->max_div) {
523 cbcr_regval &= BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
524 cbcr_regval >>= CBCR_CDIV_LSB;
525 c->rate = cbcr_regval;
526 } else if (!branch->has_sibling) {
527 c->rate = clk_get_rate(c->parent);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700528 }
529
530 return HANDOFF_ENABLED_CLK;
531}
532
533static int __branch_clk_reset(void __iomem *bcr_reg,
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700534 enum clk_reset_action action)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700535{
536 int ret = 0;
537 unsigned long flags;
538 u32 reg_val;
539
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700540 spin_lock_irqsave(&local_clock_reg_lock, flags);
541 reg_val = readl_relaxed(bcr_reg);
542 switch (action) {
543 case CLK_RESET_ASSERT:
544 reg_val |= BCR_BLK_ARES_BIT;
545 break;
546 case CLK_RESET_DEASSERT:
547 reg_val &= ~BCR_BLK_ARES_BIT;
548 break;
549 default:
550 ret = -EINVAL;
551 }
552 writel_relaxed(reg_val, bcr_reg);
553 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
554
555 /* Make sure write is issued before returning. */
556 mb();
557
558 return ret;
559}
560
561static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
562{
563 struct branch_clk *branch = to_branch_clk(c);
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700564
565 if (!branch->bcr_reg) {
566 WARN("clk_reset called on an unsupported clock (%s)\n",
567 c->dbg_name);
568 return -EPERM;
569 }
570 return __branch_clk_reset(BCR_REG(branch), action);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700571}
572
573/*
574 * Voteable clock functions
575 */
576static int local_vote_clk_reset(struct clk *c, enum clk_reset_action action)
577{
Vikram Mulukutla27784c02012-06-06 13:37:36 -0700578 struct local_vote_clk *vclk = to_local_vote_clk(c);
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700579
580 if (!vclk->bcr_reg) {
581 WARN("clk_reset called on an unsupported clock (%s)\n",
582 c->dbg_name);
583 return -EPERM;
584 }
585 return __branch_clk_reset(BCR_REG(vclk), action);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700586}
587
588static int local_vote_clk_enable(struct clk *c)
589{
590 unsigned long flags;
591 u32 ena;
592 struct local_vote_clk *vclk = to_local_vote_clk(c);
593
594 spin_lock_irqsave(&local_clock_reg_lock, flags);
595 ena = readl_relaxed(VOTE_REG(vclk));
596 ena |= vclk->en_mask;
597 writel_relaxed(ena, VOTE_REG(vclk));
598 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
599
600 branch_clk_halt_check(vclk->halt_check, c->dbg_name, CBCR_REG(vclk),
601 BRANCH_ON);
602
603 return 0;
604}
605
606static void local_vote_clk_disable(struct clk *c)
607{
608 unsigned long flags;
609 u32 ena;
610 struct local_vote_clk *vclk = to_local_vote_clk(c);
611
612 spin_lock_irqsave(&local_clock_reg_lock, flags);
613 ena = readl_relaxed(VOTE_REG(vclk));
614 ena &= ~vclk->en_mask;
615 writel_relaxed(ena, VOTE_REG(vclk));
616 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
617}
618
619static enum handoff local_vote_clk_handoff(struct clk *c)
620{
621 struct local_vote_clk *vclk = to_local_vote_clk(c);
622 u32 vote_regval;
623
624 /* Is the branch voted on by apps? */
625 vote_regval = readl_relaxed(VOTE_REG(vclk));
626 if (!(vote_regval & vclk->en_mask))
627 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700628
629 return HANDOFF_ENABLED_CLK;
630}
631
Patrick Dalyadeeb472013-03-06 21:22:32 -0800632static enum handoff byte_rcg_handoff(struct clk *clk)
633{
634 struct rcg_clk *rcg = to_rcg_clk(clk);
635 u32 div_val;
636 unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
637
638 /* If the pre-divider is used, find the rate after the division */
639 div_val = readl_relaxed(CFG_RCGR_REG(rcg)) & CFG_RCGR_DIV_MASK;
640 if (div_val > 1)
641 pre_div_rate = parent_rate / ((div_val + 1) >> 1);
642 else
643 pre_div_rate = parent_rate;
644
645 clk->rate = pre_div_rate;
646
647 if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
648 return HANDOFF_DISABLED_CLK;
649
650 return HANDOFF_ENABLED_CLK;
651}
652
653static int set_rate_byte(struct clk *clk, unsigned long rate)
654{
655 struct rcg_clk *rcg = to_rcg_clk(clk);
656 struct clk *pll = clk->parent;
657 unsigned long source_rate, div;
658 struct clk_freq_tbl *byte_freq = rcg->current_freq;
659 int rc;
660
661 if (rate == 0)
662 return -EINVAL;
663
664 rc = clk_set_rate(pll, rate);
665 if (rc)
666 return rc;
667
668 source_rate = clk_round_rate(pll, rate);
669 if ((2 * source_rate) % rate)
670 return -EINVAL;
671
672 div = ((2 * source_rate)/rate) - 1;
673 if (div > CFG_RCGR_DIV_MASK)
674 return -EINVAL;
675
676 byte_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
677 byte_freq->div_src_val |= BVAL(4, 0, div);
678 set_rate_hid(rcg, byte_freq);
679
680 return 0;
681}
682
683static enum handoff pixel_rcg_handoff(struct clk *clk)
684{
685 struct rcg_clk *rcg = to_rcg_clk(clk);
686 u32 div_val, mval, nval, cfg_regval;
687 unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
688
689 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
690
691 /* If the pre-divider is used, find the rate after the division */
692 div_val = cfg_regval & CFG_RCGR_DIV_MASK;
693 if (div_val > 1)
694 pre_div_rate = parent_rate / ((div_val + 1) >> 1);
695 else
696 pre_div_rate = parent_rate;
697
698 clk->rate = pre_div_rate;
699
700 /* If MND is used, find the rate after the MND division */
701 if ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL) {
702 mval = readl_relaxed(M_REG(rcg));
703 nval = readl_relaxed(N_REG(rcg));
704 if (!nval)
705 return HANDOFF_DISABLED_CLK;
706 nval = (~nval) + mval;
707 clk->rate = (pre_div_rate * mval) / nval;
708 }
709
710 if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
711 return HANDOFF_DISABLED_CLK;
712
713 return HANDOFF_ENABLED_CLK;
714}
715
716static int set_rate_pixel(struct clk *clk, unsigned long rate)
717{
718 struct rcg_clk *rcg = to_rcg_clk(clk);
719 struct clk *pll = clk->parent;
720 unsigned long source_rate, div;
721 struct clk_freq_tbl *pixel_freq = rcg->current_freq;
722 int rc;
723
724 if (rate == 0)
725 return -EINVAL;
726
727 rc = clk_set_rate(pll, rate);
728 if (rc)
729 return rc;
730
731 source_rate = clk_round_rate(pll, rate);
732 if ((2 * source_rate) % rate)
733 return -EINVAL;
734
735 div = ((2 * source_rate)/rate) - 1;
736 if (div > CFG_RCGR_DIV_MASK)
737 return -EINVAL;
738
739 pixel_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
740 pixel_freq->div_src_val |= BVAL(4, 0, div);
741 set_rate_mnd(rcg, pixel_freq);
742
743 return 0;
744}
745
746/*
747 * Unlike other clocks, the HDMI rate is adjusted through PLL
748 * re-programming. It is also routed through an HID divider.
749 */
750static int rcg_clk_set_rate_hdmi(struct clk *c, unsigned long rate)
751{
752 struct clk_freq_tbl *nf;
753 struct rcg_clk *rcg = to_rcg_clk(c);
754 int rc;
755
756 for (nf = rcg->freq_tbl; nf->freq_hz != rate; nf++)
757 if (nf->freq_hz == FREQ_END) {
758 rc = -EINVAL;
759 goto out;
760 }
761
762 rc = clk_set_rate(nf->src_clk, rate);
763 if (rc < 0)
764 goto out;
765 set_rate_hid(rcg, nf);
766
767 rcg->current_freq = nf;
768 c->parent = nf->src_clk;
769out:
770 return rc;
771}
772
773
Matt Wagantalledf2fad2012-08-06 16:11:46 -0700774struct clk_ops clk_ops_empty;
775
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700776struct clk_ops clk_ops_rcg = {
Stephen Boydd86d1f22012-01-24 17:36:34 -0800777 .enable = rcg_clk_prepare,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700778 .set_rate = rcg_clk_set_rate,
779 .list_rate = rcg_clk_list_rate,
780 .round_rate = rcg_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700781 .handoff = rcg_clk_handoff,
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800782 .get_parent = rcg_clk_get_parent,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700783};
784
785struct clk_ops clk_ops_rcg_mnd = {
Stephen Boydd86d1f22012-01-24 17:36:34 -0800786 .enable = rcg_clk_prepare,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700787 .set_rate = rcg_clk_set_rate,
788 .list_rate = rcg_clk_list_rate,
789 .round_rate = rcg_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700790 .handoff = rcg_mnd_clk_handoff,
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800791 .get_parent = rcg_mnd_clk_get_parent,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700792};
793
Patrick Dalyadeeb472013-03-06 21:22:32 -0800794struct clk_ops clk_ops_pixel = {
795 .enable = rcg_clk_prepare,
796 .set_rate = set_rate_pixel,
797 .list_rate = rcg_clk_list_rate,
798 .round_rate = rcg_clk_round_rate,
799 .handoff = pixel_rcg_handoff,
800};
801
802struct clk_ops clk_ops_byte = {
803 .enable = rcg_clk_prepare,
804 .set_rate = set_rate_byte,
805 .list_rate = rcg_clk_list_rate,
806 .round_rate = rcg_clk_round_rate,
807 .handoff = byte_rcg_handoff,
808};
809
810struct clk_ops clk_ops_rcg_hdmi = {
811 .enable = rcg_clk_prepare,
812 .set_rate = rcg_clk_set_rate_hdmi,
Patrick Dalyadeeb472013-03-06 21:22:32 -0800813 .list_rate = rcg_clk_list_rate,
814 .round_rate = rcg_clk_round_rate,
815 .handoff = rcg_clk_handoff,
816 .get_parent = rcg_clk_get_parent,
817};
818
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700819struct clk_ops clk_ops_branch = {
820 .enable = branch_clk_enable,
821 .disable = branch_clk_disable,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700822 .set_rate = branch_clk_set_rate,
823 .get_rate = branch_clk_get_rate,
824 .list_rate = branch_clk_list_rate,
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700825 .round_rate = branch_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700826 .reset = branch_clk_reset,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700827 .handoff = branch_clk_handoff,
828};
829
830struct clk_ops clk_ops_vote = {
831 .enable = local_vote_clk_enable,
832 .disable = local_vote_clk_disable,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700833 .reset = local_vote_clk_reset,
834 .handoff = local_vote_clk_handoff,
835};