blob: e67d97327a6245aaa0553c273d63ac71a7bfda90 [file] [log] [blame]
Saravana Kannanc85ecf92013-01-21 17:58:35 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Vikram Mulukutla8810e342011-10-20 20:26:53 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/err.h>
19#include <linux/ctype.h>
20#include <linux/bitops.h>
21#include <linux/io.h>
22#include <linux/spinlock.h>
23#include <linux/delay.h>
24#include <linux/clk.h>
25
26#include <mach/clk.h>
Matt Wagantall33d01f52012-02-23 23:27:44 -080027#include <mach/clk-provider.h>
Vikram Mulukutla8810e342011-10-20 20:26:53 -070028
Vikram Mulukutla8810e342011-10-20 20:26:53 -070029#include "clock-local2.h"
30
31/*
32 * When enabling/disabling a clock, check the halt bit up to this number
33 * number of times (with a 1 us delay in between) before continuing.
34 */
Patrick Daly20ca9d12013-03-11 20:06:20 -070035#define HALT_CHECK_MAX_LOOPS 500
Vikram Mulukutla8810e342011-10-20 20:26:53 -070036/* For clock without halt checking, wait this long after enables/disables. */
37#define HALT_CHECK_DELAY_US 10
38
39/*
40 * When updating an RCG configuration, check the update bit up to this number
41 * number of times (with a 1 us delay in between) before continuing.
42 */
Patrick Daly20ca9d12013-03-11 20:06:20 -070043#define UPDATE_CHECK_MAX_LOOPS 500
Vikram Mulukutla8810e342011-10-20 20:26:53 -070044
45DEFINE_SPINLOCK(local_clock_reg_lock);
46struct clk_freq_tbl rcg_dummy_freq = F_END;
47
48#define CMD_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg)
49#define CFG_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x4)
50#define M_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x8)
51#define N_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0xC)
52#define D_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x10)
53#define CBCR_REG(x) (*(x)->base + (x)->cbcr_reg)
54#define BCR_REG(x) (*(x)->base + (x)->bcr_reg)
55#define VOTE_REG(x) (*(x)->base + (x)->vote_reg)
56
57/*
58 * Important clock bit positions and masks
59 */
60#define CMD_RCGR_ROOT_ENABLE_BIT BIT(1)
61#define CBCR_BRANCH_ENABLE_BIT BIT(0)
62#define CBCR_BRANCH_OFF_BIT BIT(31)
63#define CMD_RCGR_CONFIG_UPDATE_BIT BIT(0)
64#define CMD_RCGR_ROOT_STATUS_BIT BIT(31)
65#define BCR_BLK_ARES_BIT BIT(0)
66#define CBCR_HW_CTL_BIT BIT(1)
67#define CFG_RCGR_DIV_MASK BM(4, 0)
68#define CFG_RCGR_SRC_SEL_MASK BM(10, 8)
69#define MND_MODE_MASK BM(13, 12)
70#define MND_DUAL_EDGE_MODE_BVAL BVAL(13, 12, 0x2)
71#define CMD_RCGR_CONFIG_DIRTY_MASK BM(7, 4)
Saravana Kannanc85ecf92013-01-21 17:58:35 -080072#define CBCR_CDIV_LSB 16
73#define CBCR_CDIV_MSB 24
Vikram Mulukutla8810e342011-10-20 20:26:53 -070074
75enum branch_state {
76 BRANCH_ON,
77 BRANCH_OFF,
78};
79
80/*
81 * RCG functions
82 */
83
84/*
85 * Update an RCG with a new configuration. This may include a new M, N, or D
86 * value, source selection or pre-divider value.
87 *
88 */
89static void rcg_update_config(struct rcg_clk *rcg)
90{
91 u32 cmd_rcgr_regval, count;
92
93 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
94 cmd_rcgr_regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
95 writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
96
97 /* Wait for update to take effect */
98 for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
99 if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
100 CMD_RCGR_CONFIG_UPDATE_BIT))
101 return;
102 udelay(1);
103 }
104
105 WARN(count == 0, "%s: rcg didn't update its configuration.",
106 rcg->c.dbg_name);
107}
108
109/* RCG set rate function for clocks with Half Integer Dividers. */
110void set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
111{
112 u32 cfg_regval;
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700113 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700114
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700115 spin_lock_irqsave(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700116 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
117 cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
118 cfg_regval |= nf->div_src_val;
119 writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
120
121 rcg_update_config(rcg);
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700122 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700123}
124
125/* RCG set rate function for clocks with MND & Half Integer Dividers. */
126void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
127{
128 u32 cfg_regval;
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700129 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700130
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700131 spin_lock_irqsave(&local_clock_reg_lock, flags);
132 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700133 writel_relaxed(nf->m_val, M_REG(rcg));
134 writel_relaxed(nf->n_val, N_REG(rcg));
135 writel_relaxed(nf->d_val, D_REG(rcg));
136
137 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
138 cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
139 cfg_regval |= nf->div_src_val;
140
141 /* Activate or disable the M/N:D divider as necessary */
142 cfg_regval &= ~MND_MODE_MASK;
143 if (nf->n_val != 0)
144 cfg_regval |= MND_DUAL_EDGE_MODE_BVAL;
145 writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
146
147 rcg_update_config(rcg);
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700148 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700149}
150
Stephen Boydd86d1f22012-01-24 17:36:34 -0800151static int rcg_clk_prepare(struct clk *c)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700152{
153 struct rcg_clk *rcg = to_rcg_clk(c);
154
155 WARN(rcg->current_freq == &rcg_dummy_freq,
Stephen Boydd86d1f22012-01-24 17:36:34 -0800156 "Attempting to prepare %s before setting its rate. "
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700157 "Set the rate first!\n", rcg->c.dbg_name);
158
159 return 0;
160}
161
162static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
163{
164 struct clk_freq_tbl *cf, *nf;
165 struct rcg_clk *rcg = to_rcg_clk(c);
Stephen Boydd86d1f22012-01-24 17:36:34 -0800166 int rc;
167 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700168
169 for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
170 && nf->freq_hz != rate; nf++)
171 ;
172
173 if (nf->freq_hz == FREQ_END)
174 return -EINVAL;
175
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700176 cf = rcg->current_freq;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700177
Stephen Boydd86d1f22012-01-24 17:36:34 -0800178 /* Enable source clock dependency for the new freq. */
179 if (c->prepare_count) {
180 rc = clk_prepare(nf->src_clk);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700181 if (rc)
Stephen Boydd86d1f22012-01-24 17:36:34 -0800182 return rc;
183 }
184
185 spin_lock_irqsave(&c->lock, flags);
186 if (c->count) {
187 rc = clk_enable(nf->src_clk);
188 if (rc) {
189 spin_unlock_irqrestore(&c->lock, flags);
190 clk_unprepare(nf->src_clk);
191 return rc;
192 }
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700193 }
194
195 BUG_ON(!rcg->set_rate);
196
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700197 /* Perform clock-specific frequency switch operations. */
198 rcg->set_rate(rcg, nf);
199
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700200 /* Release source requirements of the old freq. */
Stephen Boydd86d1f22012-01-24 17:36:34 -0800201 if (c->count)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700202 clk_disable(cf->src_clk);
Stephen Boydd86d1f22012-01-24 17:36:34 -0800203 spin_unlock_irqrestore(&c->lock, flags);
204
205 if (c->prepare_count)
206 clk_unprepare(cf->src_clk);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700207
208 rcg->current_freq = nf;
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700209 c->parent = nf->src_clk;
Stephen Boydd86d1f22012-01-24 17:36:34 -0800210
211 return 0;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700212}
213
Tianyi Gou43215f372013-03-15 12:01:30 -0700214/*
215 * Return a supported rate that's at least the specified rate or
216 * the max supported rate if the specified rate is larger than the
217 * max supported rate.
218 */
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700219static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
220{
221 struct rcg_clk *rcg = to_rcg_clk(c);
222 struct clk_freq_tbl *f;
223
224 for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
225 if (f->freq_hz >= rate)
226 return f->freq_hz;
227
Tianyi Gou43215f372013-03-15 12:01:30 -0700228 f--;
229 return f->freq_hz;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700230}
231
232/* Return the nth supported frequency for a given clock. */
233static int rcg_clk_list_rate(struct clk *c, unsigned n)
234{
235 struct rcg_clk *rcg = to_rcg_clk(c);
236
237 if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
238 return -ENXIO;
239
240 return (rcg->freq_tbl + n)->freq_hz;
241}
242
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800243static struct clk *_rcg_clk_get_parent(struct rcg_clk *rcg, int has_mnd)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700244{
245 u32 n_regval = 0, m_regval = 0, d_regval = 0;
246 u32 cfg_regval;
247 struct clk_freq_tbl *freq;
248 u32 cmd_rcgr_regval;
249
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700250 /* Is there a pending configuration? */
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800251 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700252 if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800253 return NULL;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700254
255 /* Get values of m, n, d, div and src_sel registers. */
256 if (has_mnd) {
257 m_regval = readl_relaxed(M_REG(rcg));
258 n_regval = readl_relaxed(N_REG(rcg));
259 d_regval = readl_relaxed(D_REG(rcg));
260
261 /*
262 * The n and d values stored in the frequency tables are sign
263 * extended to 32 bits. The n and d values in the registers are
264 * sign extended to 8 or 16 bits. Sign extend the values read
265 * from the registers so that they can be compared to the
266 * values in the frequency tables.
267 */
268 n_regval |= (n_regval >> 8) ? BM(31, 16) : BM(31, 8);
269 d_regval |= (d_regval >> 8) ? BM(31, 16) : BM(31, 8);
270 }
271
272 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
273 cfg_regval &= CFG_RCGR_SRC_SEL_MASK | CFG_RCGR_DIV_MASK
274 | MND_MODE_MASK;
275
276 /* If mnd counter is present, check if it's in use. */
277 has_mnd = (has_mnd) &&
278 ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL);
279
280 /*
281 * Clear out the mn counter mode bits since we now want to compare only
282 * the source mux selection and pre-divider values in the registers.
283 */
284 cfg_regval &= ~MND_MODE_MASK;
285
286 /* Figure out what rate the rcg is running at */
287 for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
288 if (freq->div_src_val != cfg_regval)
289 continue;
290 if (has_mnd) {
291 if (freq->m_val != m_regval)
292 continue;
293 if (freq->n_val != n_regval)
294 continue;
295 if (freq->d_val != d_regval)
296 continue;
297 }
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700298 break;
299 }
300
301 /* No known frequency found */
302 if (freq->freq_hz == FREQ_END)
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800303 return NULL;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700304
305 rcg->current_freq = freq;
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800306 return freq->src_clk;
307}
308
309static enum handoff _rcg_clk_handoff(struct rcg_clk *rcg)
310{
311 u32 cmd_rcgr_regval;
312
313 if (rcg->current_freq && rcg->current_freq->freq_hz != FREQ_END)
314 rcg->c.rate = rcg->current_freq->freq_hz;
315
316 /* Is the root enabled? */
317 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
318 if ((cmd_rcgr_regval & CMD_RCGR_ROOT_STATUS_BIT))
319 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700320
321 return HANDOFF_ENABLED_CLK;
322}
323
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800324static struct clk *rcg_mnd_clk_get_parent(struct clk *c)
325{
326 return _rcg_clk_get_parent(to_rcg_clk(c), 1);
327}
328
329static struct clk *rcg_clk_get_parent(struct clk *c)
330{
331 return _rcg_clk_get_parent(to_rcg_clk(c), 0);
332}
333
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700334static enum handoff rcg_mnd_clk_handoff(struct clk *c)
335{
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800336 return _rcg_clk_handoff(to_rcg_clk(c));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700337}
338
339static enum handoff rcg_clk_handoff(struct clk *c)
340{
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800341 return _rcg_clk_handoff(to_rcg_clk(c));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700342}
343
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700344#define BRANCH_CHECK_MASK BM(31, 28)
345#define BRANCH_ON_VAL BVAL(31, 28, 0x0)
346#define BRANCH_OFF_VAL BVAL(31, 28, 0x8)
347#define BRANCH_NOC_FSM_ON_VAL BVAL(31, 28, 0x2)
348
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700349/*
350 * Branch clock functions
351 */
352static void branch_clk_halt_check(u32 halt_check, const char *clk_name,
353 void __iomem *cbcr_reg,
354 enum branch_state br_status)
355{
Vikram Mulukutla86b9fa62012-05-02 16:39:14 -0700356 char *status_str = (br_status == BRANCH_ON) ? "off" : "on";
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700357
358 /*
359 * Use a memory barrier since some halt status registers are
360 * not within the same 1K segment as the branch/root enable
361 * registers. It's also needed in the udelay() case to ensure
362 * the delay starts after the branch disable.
363 */
364 mb();
365
366 if (halt_check == DELAY || halt_check == HALT_VOTED) {
367 udelay(HALT_CHECK_DELAY_US);
368 } else if (halt_check == HALT) {
369 int count;
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700370 u32 val;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700371 for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700372 val = readl_relaxed(cbcr_reg);
373 val &= BRANCH_CHECK_MASK;
374 switch (br_status) {
375 case BRANCH_ON:
376 if (val == BRANCH_ON_VAL
377 || val == BRANCH_NOC_FSM_ON_VAL)
378 return;
379 break;
380
381 case BRANCH_OFF:
382 if (val == BRANCH_OFF_VAL)
383 return;
384 break;
385 };
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700386 udelay(1);
387 }
388 WARN(count == 0, "%s status stuck %s", clk_name, status_str);
389 }
390}
391
392static int branch_clk_enable(struct clk *c)
393{
394 unsigned long flags;
395 u32 cbcr_val;
396 struct branch_clk *branch = to_branch_clk(c);
397
398 spin_lock_irqsave(&local_clock_reg_lock, flags);
399 cbcr_val = readl_relaxed(CBCR_REG(branch));
400 cbcr_val |= CBCR_BRANCH_ENABLE_BIT;
401 writel_relaxed(cbcr_val, CBCR_REG(branch));
402 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
403
404 /* Wait for clock to enable before continuing. */
405 branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
406 CBCR_REG(branch), BRANCH_ON);
407
408 return 0;
409}
410
411static void branch_clk_disable(struct clk *c)
412{
413 unsigned long flags;
414 struct branch_clk *branch = to_branch_clk(c);
415 u32 reg_val;
416
417 spin_lock_irqsave(&local_clock_reg_lock, flags);
418 reg_val = readl_relaxed(CBCR_REG(branch));
419 reg_val &= ~CBCR_BRANCH_ENABLE_BIT;
420 writel_relaxed(reg_val, CBCR_REG(branch));
421 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
422
423 /* Wait for clock to disable before continuing. */
424 branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
425 CBCR_REG(branch), BRANCH_OFF);
426}
427
428static int branch_cdiv_set_rate(struct branch_clk *branch, unsigned long rate)
429{
430 unsigned long flags;
431 u32 regval;
432
433 if (rate > branch->max_div)
434 return -EINVAL;
435
436 spin_lock_irqsave(&local_clock_reg_lock, flags);
437 regval = readl_relaxed(CBCR_REG(branch));
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800438 regval &= ~BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
439 regval |= BVAL(CBCR_CDIV_MSB, CBCR_CDIV_LSB, rate);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700440 writel_relaxed(regval, CBCR_REG(branch));
441 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
442
443 return 0;
444}
445
446static int branch_clk_set_rate(struct clk *c, unsigned long rate)
447{
448 struct branch_clk *branch = to_branch_clk(c);
449
450 if (branch->max_div)
451 return branch_cdiv_set_rate(branch, rate);
452
453 if (!branch->has_sibling)
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700454 return clk_set_rate(c->parent, rate);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700455
456 return -EPERM;
457}
458
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700459static long branch_clk_round_rate(struct clk *c, unsigned long rate)
460{
461 struct branch_clk *branch = to_branch_clk(c);
462
463 if (branch->max_div)
464 return rate <= (branch->max_div) ? rate : -EPERM;
465
466 if (!branch->has_sibling)
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700467 return clk_round_rate(c->parent, rate);
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700468
469 return -EPERM;
470}
471
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700472static unsigned long branch_clk_get_rate(struct clk *c)
473{
474 struct branch_clk *branch = to_branch_clk(c);
475
476 if (branch->max_div)
477 return branch->c.rate;
478
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800479 return clk_get_rate(c->parent);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700480}
481
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700482static int branch_clk_list_rate(struct clk *c, unsigned n)
483{
Patrick Daly13e22ed2012-10-11 14:31:11 -0700484 int level, fmax = 0, rate;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700485 struct branch_clk *branch = to_branch_clk(c);
Patrick Daly13e22ed2012-10-11 14:31:11 -0700486 struct clk *parent = c->parent;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700487
488 if (branch->has_sibling == 1)
489 return -ENXIO;
490
Patrick Daly13e22ed2012-10-11 14:31:11 -0700491 if (!parent || !parent->ops->list_rate)
492 return -ENXIO;
493
494 /* Find max frequency supported within voltage constraints. */
495 if (!parent->vdd_class) {
496 fmax = INT_MAX;
497 } else {
498 for (level = 0; level < parent->num_fmax; level++)
499 if (parent->fmax[level])
500 fmax = parent->fmax[level];
501 }
502
503 rate = parent->ops->list_rate(parent, n);
504 if (rate <= fmax)
505 return rate;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700506 else
Vikram Mulukutlae0589fc2012-09-20 12:19:16 -0700507 return -ENXIO;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700508}
509
510static enum handoff branch_clk_handoff(struct clk *c)
511{
512 struct branch_clk *branch = to_branch_clk(c);
513 u32 cbcr_regval;
514
515 cbcr_regval = readl_relaxed(CBCR_REG(branch));
516 if ((cbcr_regval & CBCR_BRANCH_OFF_BIT))
517 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700518
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800519 if (branch->max_div) {
520 cbcr_regval &= BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
521 cbcr_regval >>= CBCR_CDIV_LSB;
522 c->rate = cbcr_regval;
523 } else if (!branch->has_sibling) {
524 c->rate = clk_get_rate(c->parent);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700525 }
526
527 return HANDOFF_ENABLED_CLK;
528}
529
530static int __branch_clk_reset(void __iomem *bcr_reg,
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700531 enum clk_reset_action action)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700532{
533 int ret = 0;
534 unsigned long flags;
535 u32 reg_val;
536
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700537 spin_lock_irqsave(&local_clock_reg_lock, flags);
538 reg_val = readl_relaxed(bcr_reg);
539 switch (action) {
540 case CLK_RESET_ASSERT:
541 reg_val |= BCR_BLK_ARES_BIT;
542 break;
543 case CLK_RESET_DEASSERT:
544 reg_val &= ~BCR_BLK_ARES_BIT;
545 break;
546 default:
547 ret = -EINVAL;
548 }
549 writel_relaxed(reg_val, bcr_reg);
550 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
551
552 /* Make sure write is issued before returning. */
553 mb();
554
555 return ret;
556}
557
558static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
559{
560 struct branch_clk *branch = to_branch_clk(c);
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700561
562 if (!branch->bcr_reg) {
563 WARN("clk_reset called on an unsupported clock (%s)\n",
564 c->dbg_name);
565 return -EPERM;
566 }
567 return __branch_clk_reset(BCR_REG(branch), action);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700568}
569
Matt Wagantall39f38d92012-01-20 18:48:05 -0800570static int branch_clk_set_flags(struct clk *c, unsigned flags)
571{
572 u32 cbcr_val;
573 unsigned long irq_flags;
574 struct branch_clk *branch = to_branch_clk(c);
575 int ret = 0;
576
577 spin_lock_irqsave(&local_clock_reg_lock, irq_flags);
578 cbcr_val = readl_relaxed(CBCR_REG(branch));
579 switch (flags) {
580 case CLKFLAG_RETAIN_PERIPH:
581 cbcr_val |= BIT(13);
582 break;
583 case CLKFLAG_NORETAIN_PERIPH:
584 cbcr_val &= ~BIT(13);
585 break;
586 case CLKFLAG_RETAIN_MEM:
587 cbcr_val |= BIT(14);
588 break;
589 case CLKFLAG_NORETAIN_MEM:
590 cbcr_val &= ~BIT(14);
591 break;
592 default:
593 ret = -EINVAL;
594 }
595 writel_relaxed(cbcr_val, CBCR_REG(branch));
Matt Wagantalledb50372013-04-15 16:06:56 -0700596 /*
597 * 8974v2.2 has a requirement that writes to set bits 13 and 14 are
598 * separated by at least 2 bus cycles. Cover one of these cycles by
599 * performing an extra write here. The other cycle is covered by the
600 * read-modify-write design of this function.
601 */
602 writel_relaxed(cbcr_val, CBCR_REG(branch));
Matt Wagantall39f38d92012-01-20 18:48:05 -0800603 spin_unlock_irqrestore(&local_clock_reg_lock, irq_flags);
604
605 /* Make sure write is issued before returning. */
606 mb();
607
608 return ret;
609}
610
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700611/*
612 * Voteable clock functions
613 */
614static int local_vote_clk_reset(struct clk *c, enum clk_reset_action action)
615{
Vikram Mulukutla27784c02012-06-06 13:37:36 -0700616 struct local_vote_clk *vclk = to_local_vote_clk(c);
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700617
618 if (!vclk->bcr_reg) {
619 WARN("clk_reset called on an unsupported clock (%s)\n",
620 c->dbg_name);
621 return -EPERM;
622 }
623 return __branch_clk_reset(BCR_REG(vclk), action);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700624}
625
626static int local_vote_clk_enable(struct clk *c)
627{
628 unsigned long flags;
629 u32 ena;
630 struct local_vote_clk *vclk = to_local_vote_clk(c);
631
632 spin_lock_irqsave(&local_clock_reg_lock, flags);
633 ena = readl_relaxed(VOTE_REG(vclk));
634 ena |= vclk->en_mask;
635 writel_relaxed(ena, VOTE_REG(vclk));
636 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
637
638 branch_clk_halt_check(vclk->halt_check, c->dbg_name, CBCR_REG(vclk),
639 BRANCH_ON);
640
641 return 0;
642}
643
644static void local_vote_clk_disable(struct clk *c)
645{
646 unsigned long flags;
647 u32 ena;
648 struct local_vote_clk *vclk = to_local_vote_clk(c);
649
650 spin_lock_irqsave(&local_clock_reg_lock, flags);
651 ena = readl_relaxed(VOTE_REG(vclk));
652 ena &= ~vclk->en_mask;
653 writel_relaxed(ena, VOTE_REG(vclk));
654 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
655}
656
657static enum handoff local_vote_clk_handoff(struct clk *c)
658{
659 struct local_vote_clk *vclk = to_local_vote_clk(c);
660 u32 vote_regval;
661
662 /* Is the branch voted on by apps? */
663 vote_regval = readl_relaxed(VOTE_REG(vclk));
664 if (!(vote_regval & vclk->en_mask))
665 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700666
667 return HANDOFF_ENABLED_CLK;
668}
669
Vikram Mulukutlae03b4b62013-03-20 17:45:37 -0700670enum handoff byte_rcg_handoff(struct clk *clk)
Patrick Dalyadeeb472013-03-06 21:22:32 -0800671{
672 struct rcg_clk *rcg = to_rcg_clk(clk);
673 u32 div_val;
674 unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
675
676 /* If the pre-divider is used, find the rate after the division */
677 div_val = readl_relaxed(CFG_RCGR_REG(rcg)) & CFG_RCGR_DIV_MASK;
678 if (div_val > 1)
679 pre_div_rate = parent_rate / ((div_val + 1) >> 1);
680 else
681 pre_div_rate = parent_rate;
682
683 clk->rate = pre_div_rate;
684
685 if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
686 return HANDOFF_DISABLED_CLK;
687
688 return HANDOFF_ENABLED_CLK;
689}
690
691static int set_rate_byte(struct clk *clk, unsigned long rate)
692{
693 struct rcg_clk *rcg = to_rcg_clk(clk);
694 struct clk *pll = clk->parent;
695 unsigned long source_rate, div;
696 struct clk_freq_tbl *byte_freq = rcg->current_freq;
697 int rc;
698
699 if (rate == 0)
700 return -EINVAL;
701
702 rc = clk_set_rate(pll, rate);
703 if (rc)
704 return rc;
705
706 source_rate = clk_round_rate(pll, rate);
707 if ((2 * source_rate) % rate)
708 return -EINVAL;
709
710 div = ((2 * source_rate)/rate) - 1;
711 if (div > CFG_RCGR_DIV_MASK)
712 return -EINVAL;
713
714 byte_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
715 byte_freq->div_src_val |= BVAL(4, 0, div);
716 set_rate_hid(rcg, byte_freq);
717
718 return 0;
719}
720
Vikram Mulukutlae03b4b62013-03-20 17:45:37 -0700721enum handoff pixel_rcg_handoff(struct clk *clk)
Patrick Dalyadeeb472013-03-06 21:22:32 -0800722{
723 struct rcg_clk *rcg = to_rcg_clk(clk);
724 u32 div_val, mval, nval, cfg_regval;
725 unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
726
727 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
728
729 /* If the pre-divider is used, find the rate after the division */
730 div_val = cfg_regval & CFG_RCGR_DIV_MASK;
731 if (div_val > 1)
732 pre_div_rate = parent_rate / ((div_val + 1) >> 1);
733 else
734 pre_div_rate = parent_rate;
735
736 clk->rate = pre_div_rate;
737
738 /* If MND is used, find the rate after the MND division */
739 if ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL) {
740 mval = readl_relaxed(M_REG(rcg));
741 nval = readl_relaxed(N_REG(rcg));
742 if (!nval)
743 return HANDOFF_DISABLED_CLK;
744 nval = (~nval) + mval;
745 clk->rate = (pre_div_rate * mval) / nval;
746 }
747
748 if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
749 return HANDOFF_DISABLED_CLK;
750
751 return HANDOFF_ENABLED_CLK;
752}
753
754static int set_rate_pixel(struct clk *clk, unsigned long rate)
755{
756 struct rcg_clk *rcg = to_rcg_clk(clk);
757 struct clk *pll = clk->parent;
758 unsigned long source_rate, div;
759 struct clk_freq_tbl *pixel_freq = rcg->current_freq;
760 int rc;
761
762 if (rate == 0)
763 return -EINVAL;
764
765 rc = clk_set_rate(pll, rate);
766 if (rc)
767 return rc;
768
769 source_rate = clk_round_rate(pll, rate);
770 if ((2 * source_rate) % rate)
771 return -EINVAL;
772
773 div = ((2 * source_rate)/rate) - 1;
774 if (div > CFG_RCGR_DIV_MASK)
775 return -EINVAL;
776
777 pixel_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
778 pixel_freq->div_src_val |= BVAL(4, 0, div);
779 set_rate_mnd(rcg, pixel_freq);
780
781 return 0;
782}
783
784/*
785 * Unlike other clocks, the HDMI rate is adjusted through PLL
786 * re-programming. It is also routed through an HID divider.
787 */
788static int rcg_clk_set_rate_hdmi(struct clk *c, unsigned long rate)
789{
790 struct clk_freq_tbl *nf;
791 struct rcg_clk *rcg = to_rcg_clk(c);
792 int rc;
793
794 for (nf = rcg->freq_tbl; nf->freq_hz != rate; nf++)
795 if (nf->freq_hz == FREQ_END) {
796 rc = -EINVAL;
797 goto out;
798 }
799
800 rc = clk_set_rate(nf->src_clk, rate);
801 if (rc < 0)
802 goto out;
803 set_rate_hid(rcg, nf);
804
805 rcg->current_freq = nf;
806 c->parent = nf->src_clk;
807out:
808 return rc;
809}
810
811
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800812#define ENABLE_REG(x) (*(x)->base + (x)->enable_reg)
813#define SELECT_REG(x) (*(x)->base + (x)->select_reg)
814
815/*
816 * mux clock functions
817 */
Vikram Mulukutla49423392013-05-02 09:03:02 -0700818static void cam_mux_clk_halt_check(void)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800819{
820 /* Ensure that the delay starts after the mux disable/enable. */
821 mb();
822 udelay(HALT_CHECK_DELAY_US);
823}
824
Vikram Mulukutla49423392013-05-02 09:03:02 -0700825static int cam_mux_clk_enable(struct clk *c)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800826{
827 unsigned long flags;
828 u32 regval;
Vikram Mulukutla49423392013-05-02 09:03:02 -0700829 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800830
831 spin_lock_irqsave(&local_clock_reg_lock, flags);
832 regval = readl_relaxed(ENABLE_REG(mux));
833 regval |= mux->enable_mask;
834 writel_relaxed(regval, ENABLE_REG(mux));
835 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
836
837 /* Wait for clock to enable before continuing. */
Vikram Mulukutla49423392013-05-02 09:03:02 -0700838 cam_mux_clk_halt_check();
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800839
840 return 0;
841}
842
Vikram Mulukutla49423392013-05-02 09:03:02 -0700843static void cam_mux_clk_disable(struct clk *c)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800844{
845 unsigned long flags;
Vikram Mulukutla49423392013-05-02 09:03:02 -0700846 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800847 u32 regval;
848
849 spin_lock_irqsave(&local_clock_reg_lock, flags);
850 regval = readl_relaxed(ENABLE_REG(mux));
851 regval &= ~mux->enable_mask;
852 writel_relaxed(regval, ENABLE_REG(mux));
853 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
854
855 /* Wait for clock to disable before continuing. */
Vikram Mulukutla49423392013-05-02 09:03:02 -0700856 cam_mux_clk_halt_check();
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800857}
858
Vikram Mulukutla49423392013-05-02 09:03:02 -0700859static int mux_source_switch(struct cam_mux_clk *mux, struct mux_source *dest)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800860{
861 unsigned long flags;
862 u32 regval;
863 int ret = 0;
864
865 ret = __clk_pre_reparent(&mux->c, dest->clk, &flags);
866 if (ret)
867 goto out;
868
869 regval = readl_relaxed(SELECT_REG(mux));
870 regval &= ~mux->select_mask;
871 regval |= dest->select_val;
872 writel_relaxed(regval, SELECT_REG(mux));
873
874 /* Make sure switch request goes through before proceeding. */
875 mb();
876
877 __clk_post_reparent(&mux->c, mux->c.parent, &flags);
878out:
879 return ret;
880}
881
Vikram Mulukutla49423392013-05-02 09:03:02 -0700882static int cam_mux_clk_set_parent(struct clk *c, struct clk *parent)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800883{
Vikram Mulukutla49423392013-05-02 09:03:02 -0700884 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800885 struct mux_source *dest = NULL;
886 int ret;
887
888 if (!mux->sources || !parent)
889 return -EPERM;
890
891 dest = mux->sources;
892
893 while (dest->clk) {
894 if (dest->clk == parent)
895 break;
896 dest++;
897 }
898
899 if (!dest->clk)
900 return -EPERM;
901
902 ret = mux_source_switch(mux, dest);
903 if (ret)
904 return ret;
905
906 mux->c.rate = clk_get_rate(dest->clk);
907
908 return 0;
909}
910
Vikram Mulukutla49423392013-05-02 09:03:02 -0700911static enum handoff cam_mux_clk_handoff(struct clk *c)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800912{
Vikram Mulukutla49423392013-05-02 09:03:02 -0700913 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800914 u32 mask = mux->enable_mask;
915 u32 regval = readl_relaxed(ENABLE_REG(mux));
916
917 c->rate = clk_get_rate(c->parent);
918
919 if (mask == (regval & mask))
920 return HANDOFF_ENABLED_CLK;
921
922 return HANDOFF_DISABLED_CLK;
923}
924
Vikram Mulukutla49423392013-05-02 09:03:02 -0700925static struct clk *cam_mux_clk_get_parent(struct clk *c)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800926{
Vikram Mulukutla49423392013-05-02 09:03:02 -0700927 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800928 struct mux_source *parent = NULL;
929 u32 regval = readl_relaxed(SELECT_REG(mux));
930
931 if (!mux->sources)
932 return ERR_PTR(-EPERM);
933
934 parent = mux->sources;
935
936 while (parent->clk) {
937 if ((regval & mux->select_mask) == parent->select_val)
938 return parent->clk;
939
940 parent++;
941 }
942
943 return ERR_PTR(-EPERM);
944}
945
Vikram Mulukutla49423392013-05-02 09:03:02 -0700946static int cam_mux_clk_list_rate(struct clk *c, unsigned n)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800947{
Vikram Mulukutla49423392013-05-02 09:03:02 -0700948 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800949 int i;
950
951 for (i = 0; i < n; i++)
952 if (!mux->sources[i].clk)
953 break;
954
955 if (!mux->sources[i].clk)
956 return -ENXIO;
957
958 return clk_get_rate(mux->sources[i].clk);
959}
960
Matt Wagantalledf2fad2012-08-06 16:11:46 -0700961struct clk_ops clk_ops_empty;
962
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700963struct clk_ops clk_ops_rcg = {
Stephen Boydd86d1f22012-01-24 17:36:34 -0800964 .enable = rcg_clk_prepare,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700965 .set_rate = rcg_clk_set_rate,
966 .list_rate = rcg_clk_list_rate,
967 .round_rate = rcg_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700968 .handoff = rcg_clk_handoff,
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800969 .get_parent = rcg_clk_get_parent,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700970};
971
972struct clk_ops clk_ops_rcg_mnd = {
Stephen Boydd86d1f22012-01-24 17:36:34 -0800973 .enable = rcg_clk_prepare,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700974 .set_rate = rcg_clk_set_rate,
975 .list_rate = rcg_clk_list_rate,
976 .round_rate = rcg_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700977 .handoff = rcg_mnd_clk_handoff,
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800978 .get_parent = rcg_mnd_clk_get_parent,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700979};
980
Patrick Dalyadeeb472013-03-06 21:22:32 -0800981struct clk_ops clk_ops_pixel = {
982 .enable = rcg_clk_prepare,
983 .set_rate = set_rate_pixel,
984 .list_rate = rcg_clk_list_rate,
985 .round_rate = rcg_clk_round_rate,
986 .handoff = pixel_rcg_handoff,
987};
988
989struct clk_ops clk_ops_byte = {
990 .enable = rcg_clk_prepare,
991 .set_rate = set_rate_byte,
992 .list_rate = rcg_clk_list_rate,
993 .round_rate = rcg_clk_round_rate,
994 .handoff = byte_rcg_handoff,
995};
996
997struct clk_ops clk_ops_rcg_hdmi = {
998 .enable = rcg_clk_prepare,
999 .set_rate = rcg_clk_set_rate_hdmi,
Patrick Dalyadeeb472013-03-06 21:22:32 -08001000 .list_rate = rcg_clk_list_rate,
1001 .round_rate = rcg_clk_round_rate,
1002 .handoff = rcg_clk_handoff,
1003 .get_parent = rcg_clk_get_parent,
1004};
1005
Vikram Mulukutla8810e342011-10-20 20:26:53 -07001006struct clk_ops clk_ops_branch = {
1007 .enable = branch_clk_enable,
1008 .disable = branch_clk_disable,
Vikram Mulukutla8810e342011-10-20 20:26:53 -07001009 .set_rate = branch_clk_set_rate,
1010 .get_rate = branch_clk_get_rate,
1011 .list_rate = branch_clk_list_rate,
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -07001012 .round_rate = branch_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -07001013 .reset = branch_clk_reset,
Matt Wagantall39f38d92012-01-20 18:48:05 -08001014 .set_flags = branch_clk_set_flags,
Vikram Mulukutla8810e342011-10-20 20:26:53 -07001015 .handoff = branch_clk_handoff,
1016};
1017
1018struct clk_ops clk_ops_vote = {
1019 .enable = local_vote_clk_enable,
1020 .disable = local_vote_clk_disable,
Vikram Mulukutla8810e342011-10-20 20:26:53 -07001021 .reset = local_vote_clk_reset,
1022 .handoff = local_vote_clk_handoff,
1023};
Vikram Mulukutla27859df2013-01-17 20:56:15 -08001024
Vikram Mulukutla49423392013-05-02 09:03:02 -07001025struct clk_ops clk_ops_cam_mux = {
1026 .enable = cam_mux_clk_enable,
1027 .disable = cam_mux_clk_disable,
1028 .set_parent = cam_mux_clk_set_parent,
1029 .get_parent = cam_mux_clk_get_parent,
1030 .handoff = cam_mux_clk_handoff,
1031 .list_rate = cam_mux_clk_list_rate,
Vikram Mulukutla27859df2013-01-17 20:56:15 -08001032};
1033
1034