blob: 24af44e215c18e1082b0c2336775cbdd8132c2db [file] [log] [blame]
Saravana Kannanc85ecf92013-01-21 17:58:35 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Vikram Mulukutla8810e342011-10-20 20:26:53 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/err.h>
19#include <linux/ctype.h>
20#include <linux/bitops.h>
21#include <linux/io.h>
22#include <linux/spinlock.h>
23#include <linux/delay.h>
24#include <linux/clk.h>
25
26#include <mach/clk.h>
Matt Wagantall33d01f52012-02-23 23:27:44 -080027#include <mach/clk-provider.h>
Vikram Mulukutla8810e342011-10-20 20:26:53 -070028
Vikram Mulukutla8810e342011-10-20 20:26:53 -070029#include "clock-local2.h"
30
31/*
32 * When enabling/disabling a clock, check the halt bit up to this number
33 * number of times (with a 1 us delay in between) before continuing.
34 */
Patrick Daly20ca9d12013-03-11 20:06:20 -070035#define HALT_CHECK_MAX_LOOPS 500
Vikram Mulukutla8810e342011-10-20 20:26:53 -070036/* For clock without halt checking, wait this long after enables/disables. */
37#define HALT_CHECK_DELAY_US 10
38
39/*
40 * When updating an RCG configuration, check the update bit up to this number
41 * number of times (with a 1 us delay in between) before continuing.
42 */
Patrick Daly20ca9d12013-03-11 20:06:20 -070043#define UPDATE_CHECK_MAX_LOOPS 500
Vikram Mulukutla8810e342011-10-20 20:26:53 -070044
45DEFINE_SPINLOCK(local_clock_reg_lock);
46struct clk_freq_tbl rcg_dummy_freq = F_END;
47
48#define CMD_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg)
49#define CFG_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x4)
50#define M_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x8)
51#define N_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0xC)
52#define D_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x10)
53#define CBCR_REG(x) (*(x)->base + (x)->cbcr_reg)
54#define BCR_REG(x) (*(x)->base + (x)->bcr_reg)
55#define VOTE_REG(x) (*(x)->base + (x)->vote_reg)
56
57/*
58 * Important clock bit positions and masks
59 */
60#define CMD_RCGR_ROOT_ENABLE_BIT BIT(1)
61#define CBCR_BRANCH_ENABLE_BIT BIT(0)
62#define CBCR_BRANCH_OFF_BIT BIT(31)
63#define CMD_RCGR_CONFIG_UPDATE_BIT BIT(0)
64#define CMD_RCGR_ROOT_STATUS_BIT BIT(31)
65#define BCR_BLK_ARES_BIT BIT(0)
66#define CBCR_HW_CTL_BIT BIT(1)
67#define CFG_RCGR_DIV_MASK BM(4, 0)
68#define CFG_RCGR_SRC_SEL_MASK BM(10, 8)
69#define MND_MODE_MASK BM(13, 12)
70#define MND_DUAL_EDGE_MODE_BVAL BVAL(13, 12, 0x2)
71#define CMD_RCGR_CONFIG_DIRTY_MASK BM(7, 4)
Saravana Kannanc85ecf92013-01-21 17:58:35 -080072#define CBCR_CDIV_LSB 16
73#define CBCR_CDIV_MSB 24
Vikram Mulukutla8810e342011-10-20 20:26:53 -070074
75enum branch_state {
76 BRANCH_ON,
77 BRANCH_OFF,
78};
79
80/*
81 * RCG functions
82 */
83
84/*
85 * Update an RCG with a new configuration. This may include a new M, N, or D
86 * value, source selection or pre-divider value.
87 *
88 */
89static void rcg_update_config(struct rcg_clk *rcg)
90{
91 u32 cmd_rcgr_regval, count;
92
93 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
94 cmd_rcgr_regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
95 writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
96
97 /* Wait for update to take effect */
98 for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
99 if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
100 CMD_RCGR_CONFIG_UPDATE_BIT))
101 return;
102 udelay(1);
103 }
104
105 WARN(count == 0, "%s: rcg didn't update its configuration.",
106 rcg->c.dbg_name);
107}
108
109/* RCG set rate function for clocks with Half Integer Dividers. */
110void set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
111{
112 u32 cfg_regval;
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700113 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700114
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700115 spin_lock_irqsave(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700116 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
117 cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
118 cfg_regval |= nf->div_src_val;
119 writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
120
121 rcg_update_config(rcg);
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700122 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700123}
124
125/* RCG set rate function for clocks with MND & Half Integer Dividers. */
126void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
127{
128 u32 cfg_regval;
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700129 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700130
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700131 spin_lock_irqsave(&local_clock_reg_lock, flags);
132 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700133 writel_relaxed(nf->m_val, M_REG(rcg));
134 writel_relaxed(nf->n_val, N_REG(rcg));
135 writel_relaxed(nf->d_val, D_REG(rcg));
136
137 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
138 cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
139 cfg_regval |= nf->div_src_val;
140
141 /* Activate or disable the M/N:D divider as necessary */
142 cfg_regval &= ~MND_MODE_MASK;
143 if (nf->n_val != 0)
144 cfg_regval |= MND_DUAL_EDGE_MODE_BVAL;
145 writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
146
147 rcg_update_config(rcg);
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700148 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700149}
150
Stephen Boydd86d1f22012-01-24 17:36:34 -0800151static int rcg_clk_prepare(struct clk *c)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700152{
153 struct rcg_clk *rcg = to_rcg_clk(c);
154
155 WARN(rcg->current_freq == &rcg_dummy_freq,
Stephen Boydd86d1f22012-01-24 17:36:34 -0800156 "Attempting to prepare %s before setting its rate. "
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700157 "Set the rate first!\n", rcg->c.dbg_name);
158
159 return 0;
160}
161
162static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
163{
164 struct clk_freq_tbl *cf, *nf;
165 struct rcg_clk *rcg = to_rcg_clk(c);
Stephen Boydd86d1f22012-01-24 17:36:34 -0800166 int rc;
167 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700168
169 for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
170 && nf->freq_hz != rate; nf++)
171 ;
172
173 if (nf->freq_hz == FREQ_END)
174 return -EINVAL;
175
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700176 cf = rcg->current_freq;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700177
Vikram Mulukutlaa7a12682013-05-21 12:02:21 -0700178 rc = __clk_pre_reparent(c, nf->src_clk, &flags);
179 if (rc)
180 return rc;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700181
182 BUG_ON(!rcg->set_rate);
183
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700184 /* Perform clock-specific frequency switch operations. */
185 rcg->set_rate(rcg, nf);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700186 rcg->current_freq = nf;
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700187 c->parent = nf->src_clk;
Stephen Boydd86d1f22012-01-24 17:36:34 -0800188
Vikram Mulukutlaa7a12682013-05-21 12:02:21 -0700189 __clk_post_reparent(c, cf->src_clk, &flags);
190
Stephen Boydd86d1f22012-01-24 17:36:34 -0800191 return 0;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700192}
193
Tianyi Gou43215f372013-03-15 12:01:30 -0700194/*
195 * Return a supported rate that's at least the specified rate or
196 * the max supported rate if the specified rate is larger than the
197 * max supported rate.
198 */
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700199static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
200{
201 struct rcg_clk *rcg = to_rcg_clk(c);
202 struct clk_freq_tbl *f;
203
204 for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
205 if (f->freq_hz >= rate)
206 return f->freq_hz;
207
Tianyi Gou43215f372013-03-15 12:01:30 -0700208 f--;
209 return f->freq_hz;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700210}
211
212/* Return the nth supported frequency for a given clock. */
213static int rcg_clk_list_rate(struct clk *c, unsigned n)
214{
215 struct rcg_clk *rcg = to_rcg_clk(c);
216
217 if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
218 return -ENXIO;
219
220 return (rcg->freq_tbl + n)->freq_hz;
221}
222
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800223static struct clk *_rcg_clk_get_parent(struct rcg_clk *rcg, int has_mnd)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700224{
225 u32 n_regval = 0, m_regval = 0, d_regval = 0;
226 u32 cfg_regval;
227 struct clk_freq_tbl *freq;
228 u32 cmd_rcgr_regval;
229
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700230 /* Is there a pending configuration? */
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800231 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700232 if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800233 return NULL;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700234
235 /* Get values of m, n, d, div and src_sel registers. */
236 if (has_mnd) {
237 m_regval = readl_relaxed(M_REG(rcg));
238 n_regval = readl_relaxed(N_REG(rcg));
239 d_regval = readl_relaxed(D_REG(rcg));
240
241 /*
242 * The n and d values stored in the frequency tables are sign
243 * extended to 32 bits. The n and d values in the registers are
244 * sign extended to 8 or 16 bits. Sign extend the values read
245 * from the registers so that they can be compared to the
246 * values in the frequency tables.
247 */
248 n_regval |= (n_regval >> 8) ? BM(31, 16) : BM(31, 8);
249 d_regval |= (d_regval >> 8) ? BM(31, 16) : BM(31, 8);
250 }
251
252 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
253 cfg_regval &= CFG_RCGR_SRC_SEL_MASK | CFG_RCGR_DIV_MASK
254 | MND_MODE_MASK;
255
256 /* If mnd counter is present, check if it's in use. */
257 has_mnd = (has_mnd) &&
258 ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL);
259
260 /*
261 * Clear out the mn counter mode bits since we now want to compare only
262 * the source mux selection and pre-divider values in the registers.
263 */
264 cfg_regval &= ~MND_MODE_MASK;
265
266 /* Figure out what rate the rcg is running at */
267 for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
268 if (freq->div_src_val != cfg_regval)
269 continue;
270 if (has_mnd) {
271 if (freq->m_val != m_regval)
272 continue;
273 if (freq->n_val != n_regval)
274 continue;
275 if (freq->d_val != d_regval)
276 continue;
277 }
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700278 break;
279 }
280
281 /* No known frequency found */
282 if (freq->freq_hz == FREQ_END)
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800283 return NULL;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700284
285 rcg->current_freq = freq;
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800286 return freq->src_clk;
287}
288
289static enum handoff _rcg_clk_handoff(struct rcg_clk *rcg)
290{
291 u32 cmd_rcgr_regval;
292
293 if (rcg->current_freq && rcg->current_freq->freq_hz != FREQ_END)
294 rcg->c.rate = rcg->current_freq->freq_hz;
295
296 /* Is the root enabled? */
297 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
298 if ((cmd_rcgr_regval & CMD_RCGR_ROOT_STATUS_BIT))
299 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700300
301 return HANDOFF_ENABLED_CLK;
302}
303
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800304static struct clk *rcg_mnd_clk_get_parent(struct clk *c)
305{
306 return _rcg_clk_get_parent(to_rcg_clk(c), 1);
307}
308
309static struct clk *rcg_clk_get_parent(struct clk *c)
310{
311 return _rcg_clk_get_parent(to_rcg_clk(c), 0);
312}
313
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700314static enum handoff rcg_mnd_clk_handoff(struct clk *c)
315{
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800316 return _rcg_clk_handoff(to_rcg_clk(c));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700317}
318
319static enum handoff rcg_clk_handoff(struct clk *c)
320{
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800321 return _rcg_clk_handoff(to_rcg_clk(c));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700322}
323
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700324#define BRANCH_CHECK_MASK BM(31, 28)
325#define BRANCH_ON_VAL BVAL(31, 28, 0x0)
326#define BRANCH_OFF_VAL BVAL(31, 28, 0x8)
327#define BRANCH_NOC_FSM_ON_VAL BVAL(31, 28, 0x2)
328
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700329/*
330 * Branch clock functions
331 */
332static void branch_clk_halt_check(u32 halt_check, const char *clk_name,
333 void __iomem *cbcr_reg,
334 enum branch_state br_status)
335{
Vikram Mulukutla86b9fa62012-05-02 16:39:14 -0700336 char *status_str = (br_status == BRANCH_ON) ? "off" : "on";
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700337
338 /*
339 * Use a memory barrier since some halt status registers are
340 * not within the same 1K segment as the branch/root enable
341 * registers. It's also needed in the udelay() case to ensure
342 * the delay starts after the branch disable.
343 */
344 mb();
345
346 if (halt_check == DELAY || halt_check == HALT_VOTED) {
347 udelay(HALT_CHECK_DELAY_US);
348 } else if (halt_check == HALT) {
349 int count;
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700350 u32 val;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700351 for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700352 val = readl_relaxed(cbcr_reg);
353 val &= BRANCH_CHECK_MASK;
354 switch (br_status) {
355 case BRANCH_ON:
356 if (val == BRANCH_ON_VAL
357 || val == BRANCH_NOC_FSM_ON_VAL)
358 return;
359 break;
360
361 case BRANCH_OFF:
362 if (val == BRANCH_OFF_VAL)
363 return;
364 break;
365 };
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700366 udelay(1);
367 }
368 WARN(count == 0, "%s status stuck %s", clk_name, status_str);
369 }
370}
371
372static int branch_clk_enable(struct clk *c)
373{
374 unsigned long flags;
375 u32 cbcr_val;
376 struct branch_clk *branch = to_branch_clk(c);
377
378 spin_lock_irqsave(&local_clock_reg_lock, flags);
379 cbcr_val = readl_relaxed(CBCR_REG(branch));
380 cbcr_val |= CBCR_BRANCH_ENABLE_BIT;
381 writel_relaxed(cbcr_val, CBCR_REG(branch));
382 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
383
384 /* Wait for clock to enable before continuing. */
385 branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
386 CBCR_REG(branch), BRANCH_ON);
387
388 return 0;
389}
390
391static void branch_clk_disable(struct clk *c)
392{
393 unsigned long flags;
394 struct branch_clk *branch = to_branch_clk(c);
395 u32 reg_val;
396
397 spin_lock_irqsave(&local_clock_reg_lock, flags);
398 reg_val = readl_relaxed(CBCR_REG(branch));
399 reg_val &= ~CBCR_BRANCH_ENABLE_BIT;
400 writel_relaxed(reg_val, CBCR_REG(branch));
401 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
402
403 /* Wait for clock to disable before continuing. */
404 branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
405 CBCR_REG(branch), BRANCH_OFF);
406}
407
408static int branch_cdiv_set_rate(struct branch_clk *branch, unsigned long rate)
409{
410 unsigned long flags;
411 u32 regval;
412
413 if (rate > branch->max_div)
414 return -EINVAL;
415
416 spin_lock_irqsave(&local_clock_reg_lock, flags);
417 regval = readl_relaxed(CBCR_REG(branch));
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800418 regval &= ~BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
419 regval |= BVAL(CBCR_CDIV_MSB, CBCR_CDIV_LSB, rate);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700420 writel_relaxed(regval, CBCR_REG(branch));
421 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
422
423 return 0;
424}
425
426static int branch_clk_set_rate(struct clk *c, unsigned long rate)
427{
428 struct branch_clk *branch = to_branch_clk(c);
429
430 if (branch->max_div)
431 return branch_cdiv_set_rate(branch, rate);
432
433 if (!branch->has_sibling)
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700434 return clk_set_rate(c->parent, rate);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700435
436 return -EPERM;
437}
438
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700439static long branch_clk_round_rate(struct clk *c, unsigned long rate)
440{
441 struct branch_clk *branch = to_branch_clk(c);
442
443 if (branch->max_div)
444 return rate <= (branch->max_div) ? rate : -EPERM;
445
446 if (!branch->has_sibling)
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700447 return clk_round_rate(c->parent, rate);
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700448
449 return -EPERM;
450}
451
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700452static unsigned long branch_clk_get_rate(struct clk *c)
453{
454 struct branch_clk *branch = to_branch_clk(c);
455
456 if (branch->max_div)
457 return branch->c.rate;
458
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800459 return clk_get_rate(c->parent);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700460}
461
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700462static int branch_clk_list_rate(struct clk *c, unsigned n)
463{
Patrick Daly13e22ed2012-10-11 14:31:11 -0700464 int level, fmax = 0, rate;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700465 struct branch_clk *branch = to_branch_clk(c);
Patrick Daly13e22ed2012-10-11 14:31:11 -0700466 struct clk *parent = c->parent;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700467
468 if (branch->has_sibling == 1)
469 return -ENXIO;
470
Patrick Daly13e22ed2012-10-11 14:31:11 -0700471 if (!parent || !parent->ops->list_rate)
472 return -ENXIO;
473
474 /* Find max frequency supported within voltage constraints. */
475 if (!parent->vdd_class) {
476 fmax = INT_MAX;
477 } else {
478 for (level = 0; level < parent->num_fmax; level++)
479 if (parent->fmax[level])
480 fmax = parent->fmax[level];
481 }
482
483 rate = parent->ops->list_rate(parent, n);
484 if (rate <= fmax)
485 return rate;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700486 else
Vikram Mulukutlae0589fc2012-09-20 12:19:16 -0700487 return -ENXIO;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700488}
489
490static enum handoff branch_clk_handoff(struct clk *c)
491{
492 struct branch_clk *branch = to_branch_clk(c);
493 u32 cbcr_regval;
494
495 cbcr_regval = readl_relaxed(CBCR_REG(branch));
496 if ((cbcr_regval & CBCR_BRANCH_OFF_BIT))
497 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700498
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800499 if (branch->max_div) {
500 cbcr_regval &= BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
501 cbcr_regval >>= CBCR_CDIV_LSB;
502 c->rate = cbcr_regval;
503 } else if (!branch->has_sibling) {
504 c->rate = clk_get_rate(c->parent);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700505 }
506
507 return HANDOFF_ENABLED_CLK;
508}
509
510static int __branch_clk_reset(void __iomem *bcr_reg,
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700511 enum clk_reset_action action)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700512{
513 int ret = 0;
514 unsigned long flags;
515 u32 reg_val;
516
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700517 spin_lock_irqsave(&local_clock_reg_lock, flags);
518 reg_val = readl_relaxed(bcr_reg);
519 switch (action) {
520 case CLK_RESET_ASSERT:
521 reg_val |= BCR_BLK_ARES_BIT;
522 break;
523 case CLK_RESET_DEASSERT:
524 reg_val &= ~BCR_BLK_ARES_BIT;
525 break;
526 default:
527 ret = -EINVAL;
528 }
529 writel_relaxed(reg_val, bcr_reg);
530 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
531
532 /* Make sure write is issued before returning. */
533 mb();
534
535 return ret;
536}
537
538static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
539{
540 struct branch_clk *branch = to_branch_clk(c);
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700541
Matt Wagantalld8d735d2013-05-20 20:12:08 -0700542 if (!branch->bcr_reg)
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700543 return -EPERM;
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700544 return __branch_clk_reset(BCR_REG(branch), action);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700545}
546
Matt Wagantall39f38d92012-01-20 18:48:05 -0800547static int branch_clk_set_flags(struct clk *c, unsigned flags)
548{
549 u32 cbcr_val;
550 unsigned long irq_flags;
551 struct branch_clk *branch = to_branch_clk(c);
552 int ret = 0;
553
554 spin_lock_irqsave(&local_clock_reg_lock, irq_flags);
555 cbcr_val = readl_relaxed(CBCR_REG(branch));
556 switch (flags) {
557 case CLKFLAG_RETAIN_PERIPH:
558 cbcr_val |= BIT(13);
559 break;
560 case CLKFLAG_NORETAIN_PERIPH:
561 cbcr_val &= ~BIT(13);
562 break;
563 case CLKFLAG_RETAIN_MEM:
564 cbcr_val |= BIT(14);
565 break;
566 case CLKFLAG_NORETAIN_MEM:
567 cbcr_val &= ~BIT(14);
568 break;
569 default:
570 ret = -EINVAL;
571 }
572 writel_relaxed(cbcr_val, CBCR_REG(branch));
Matt Wagantalledb50372013-04-15 16:06:56 -0700573 /*
574 * 8974v2.2 has a requirement that writes to set bits 13 and 14 are
575 * separated by at least 2 bus cycles. Cover one of these cycles by
576 * performing an extra write here. The other cycle is covered by the
577 * read-modify-write design of this function.
578 */
579 writel_relaxed(cbcr_val, CBCR_REG(branch));
Matt Wagantall39f38d92012-01-20 18:48:05 -0800580 spin_unlock_irqrestore(&local_clock_reg_lock, irq_flags);
581
582 /* Make sure write is issued before returning. */
583 mb();
584
585 return ret;
586}
587
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700588/*
589 * Voteable clock functions
590 */
591static int local_vote_clk_reset(struct clk *c, enum clk_reset_action action)
592{
Vikram Mulukutla27784c02012-06-06 13:37:36 -0700593 struct local_vote_clk *vclk = to_local_vote_clk(c);
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700594
595 if (!vclk->bcr_reg) {
596 WARN("clk_reset called on an unsupported clock (%s)\n",
597 c->dbg_name);
598 return -EPERM;
599 }
600 return __branch_clk_reset(BCR_REG(vclk), action);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700601}
602
603static int local_vote_clk_enable(struct clk *c)
604{
605 unsigned long flags;
606 u32 ena;
607 struct local_vote_clk *vclk = to_local_vote_clk(c);
608
609 spin_lock_irqsave(&local_clock_reg_lock, flags);
610 ena = readl_relaxed(VOTE_REG(vclk));
611 ena |= vclk->en_mask;
612 writel_relaxed(ena, VOTE_REG(vclk));
613 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
614
615 branch_clk_halt_check(vclk->halt_check, c->dbg_name, CBCR_REG(vclk),
616 BRANCH_ON);
617
618 return 0;
619}
620
621static void local_vote_clk_disable(struct clk *c)
622{
623 unsigned long flags;
624 u32 ena;
625 struct local_vote_clk *vclk = to_local_vote_clk(c);
626
627 spin_lock_irqsave(&local_clock_reg_lock, flags);
628 ena = readl_relaxed(VOTE_REG(vclk));
629 ena &= ~vclk->en_mask;
630 writel_relaxed(ena, VOTE_REG(vclk));
631 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
632}
633
634static enum handoff local_vote_clk_handoff(struct clk *c)
635{
636 struct local_vote_clk *vclk = to_local_vote_clk(c);
637 u32 vote_regval;
638
639 /* Is the branch voted on by apps? */
640 vote_regval = readl_relaxed(VOTE_REG(vclk));
641 if (!(vote_regval & vclk->en_mask))
642 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700643
644 return HANDOFF_ENABLED_CLK;
645}
646
Vikram Mulukutlae03b4b62013-03-20 17:45:37 -0700647enum handoff byte_rcg_handoff(struct clk *clk)
Patrick Dalyadeeb472013-03-06 21:22:32 -0800648{
649 struct rcg_clk *rcg = to_rcg_clk(clk);
650 u32 div_val;
651 unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
652
653 /* If the pre-divider is used, find the rate after the division */
654 div_val = readl_relaxed(CFG_RCGR_REG(rcg)) & CFG_RCGR_DIV_MASK;
655 if (div_val > 1)
656 pre_div_rate = parent_rate / ((div_val + 1) >> 1);
657 else
658 pre_div_rate = parent_rate;
659
660 clk->rate = pre_div_rate;
661
662 if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
663 return HANDOFF_DISABLED_CLK;
664
665 return HANDOFF_ENABLED_CLK;
666}
667
668static int set_rate_byte(struct clk *clk, unsigned long rate)
669{
670 struct rcg_clk *rcg = to_rcg_clk(clk);
671 struct clk *pll = clk->parent;
672 unsigned long source_rate, div;
673 struct clk_freq_tbl *byte_freq = rcg->current_freq;
674 int rc;
675
676 if (rate == 0)
677 return -EINVAL;
678
679 rc = clk_set_rate(pll, rate);
680 if (rc)
681 return rc;
682
683 source_rate = clk_round_rate(pll, rate);
684 if ((2 * source_rate) % rate)
685 return -EINVAL;
686
687 div = ((2 * source_rate)/rate) - 1;
688 if (div > CFG_RCGR_DIV_MASK)
689 return -EINVAL;
690
691 byte_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
692 byte_freq->div_src_val |= BVAL(4, 0, div);
693 set_rate_hid(rcg, byte_freq);
694
695 return 0;
696}
697
Vikram Mulukutlae03b4b62013-03-20 17:45:37 -0700698enum handoff pixel_rcg_handoff(struct clk *clk)
Patrick Dalyadeeb472013-03-06 21:22:32 -0800699{
700 struct rcg_clk *rcg = to_rcg_clk(clk);
701 u32 div_val, mval, nval, cfg_regval;
702 unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
703
704 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
705
706 /* If the pre-divider is used, find the rate after the division */
707 div_val = cfg_regval & CFG_RCGR_DIV_MASK;
708 if (div_val > 1)
709 pre_div_rate = parent_rate / ((div_val + 1) >> 1);
710 else
711 pre_div_rate = parent_rate;
712
713 clk->rate = pre_div_rate;
714
715 /* If MND is used, find the rate after the MND division */
716 if ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL) {
717 mval = readl_relaxed(M_REG(rcg));
718 nval = readl_relaxed(N_REG(rcg));
719 if (!nval)
720 return HANDOFF_DISABLED_CLK;
721 nval = (~nval) + mval;
722 clk->rate = (pre_div_rate * mval) / nval;
723 }
724
725 if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
726 return HANDOFF_DISABLED_CLK;
727
728 return HANDOFF_ENABLED_CLK;
729}
730
731static int set_rate_pixel(struct clk *clk, unsigned long rate)
732{
733 struct rcg_clk *rcg = to_rcg_clk(clk);
734 struct clk *pll = clk->parent;
735 unsigned long source_rate, div;
736 struct clk_freq_tbl *pixel_freq = rcg->current_freq;
737 int rc;
738
739 if (rate == 0)
740 return -EINVAL;
741
742 rc = clk_set_rate(pll, rate);
743 if (rc)
744 return rc;
745
746 source_rate = clk_round_rate(pll, rate);
747 if ((2 * source_rate) % rate)
748 return -EINVAL;
749
750 div = ((2 * source_rate)/rate) - 1;
751 if (div > CFG_RCGR_DIV_MASK)
752 return -EINVAL;
753
754 pixel_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
755 pixel_freq->div_src_val |= BVAL(4, 0, div);
756 set_rate_mnd(rcg, pixel_freq);
757
758 return 0;
759}
760
761/*
762 * Unlike other clocks, the HDMI rate is adjusted through PLL
763 * re-programming. It is also routed through an HID divider.
764 */
765static int rcg_clk_set_rate_hdmi(struct clk *c, unsigned long rate)
766{
767 struct clk_freq_tbl *nf;
768 struct rcg_clk *rcg = to_rcg_clk(c);
769 int rc;
770
771 for (nf = rcg->freq_tbl; nf->freq_hz != rate; nf++)
772 if (nf->freq_hz == FREQ_END) {
773 rc = -EINVAL;
774 goto out;
775 }
776
777 rc = clk_set_rate(nf->src_clk, rate);
778 if (rc < 0)
779 goto out;
780 set_rate_hid(rcg, nf);
781
782 rcg->current_freq = nf;
783 c->parent = nf->src_clk;
784out:
785 return rc;
786}
787
788
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800789#define ENABLE_REG(x) (*(x)->base + (x)->enable_reg)
790#define SELECT_REG(x) (*(x)->base + (x)->select_reg)
791
792/*
793 * mux clock functions
794 */
Vikram Mulukutla49423392013-05-02 09:03:02 -0700795static void cam_mux_clk_halt_check(void)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800796{
797 /* Ensure that the delay starts after the mux disable/enable. */
798 mb();
799 udelay(HALT_CHECK_DELAY_US);
800}
801
Vikram Mulukutla49423392013-05-02 09:03:02 -0700802static int cam_mux_clk_enable(struct clk *c)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800803{
804 unsigned long flags;
805 u32 regval;
Vikram Mulukutla49423392013-05-02 09:03:02 -0700806 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800807
808 spin_lock_irqsave(&local_clock_reg_lock, flags);
809 regval = readl_relaxed(ENABLE_REG(mux));
810 regval |= mux->enable_mask;
811 writel_relaxed(regval, ENABLE_REG(mux));
812 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
813
814 /* Wait for clock to enable before continuing. */
Vikram Mulukutla49423392013-05-02 09:03:02 -0700815 cam_mux_clk_halt_check();
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800816
817 return 0;
818}
819
Vikram Mulukutla49423392013-05-02 09:03:02 -0700820static void cam_mux_clk_disable(struct clk *c)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800821{
822 unsigned long flags;
Vikram Mulukutla49423392013-05-02 09:03:02 -0700823 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800824 u32 regval;
825
826 spin_lock_irqsave(&local_clock_reg_lock, flags);
827 regval = readl_relaxed(ENABLE_REG(mux));
828 regval &= ~mux->enable_mask;
829 writel_relaxed(regval, ENABLE_REG(mux));
830 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
831
832 /* Wait for clock to disable before continuing. */
Vikram Mulukutla49423392013-05-02 09:03:02 -0700833 cam_mux_clk_halt_check();
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800834}
835
Vikram Mulukutla49423392013-05-02 09:03:02 -0700836static int mux_source_switch(struct cam_mux_clk *mux, struct mux_source *dest)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800837{
838 unsigned long flags;
839 u32 regval;
840 int ret = 0;
841
842 ret = __clk_pre_reparent(&mux->c, dest->clk, &flags);
843 if (ret)
844 goto out;
845
846 regval = readl_relaxed(SELECT_REG(mux));
847 regval &= ~mux->select_mask;
848 regval |= dest->select_val;
849 writel_relaxed(regval, SELECT_REG(mux));
850
851 /* Make sure switch request goes through before proceeding. */
852 mb();
853
854 __clk_post_reparent(&mux->c, mux->c.parent, &flags);
855out:
856 return ret;
857}
858
Vikram Mulukutla49423392013-05-02 09:03:02 -0700859static int cam_mux_clk_set_parent(struct clk *c, struct clk *parent)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800860{
Vikram Mulukutla49423392013-05-02 09:03:02 -0700861 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800862 struct mux_source *dest = NULL;
863 int ret;
864
865 if (!mux->sources || !parent)
866 return -EPERM;
867
868 dest = mux->sources;
869
870 while (dest->clk) {
871 if (dest->clk == parent)
872 break;
873 dest++;
874 }
875
876 if (!dest->clk)
877 return -EPERM;
878
879 ret = mux_source_switch(mux, dest);
880 if (ret)
881 return ret;
882
883 mux->c.rate = clk_get_rate(dest->clk);
884
885 return 0;
886}
887
Vikram Mulukutla49423392013-05-02 09:03:02 -0700888static enum handoff cam_mux_clk_handoff(struct clk *c)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800889{
Vikram Mulukutla49423392013-05-02 09:03:02 -0700890 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800891 u32 mask = mux->enable_mask;
892 u32 regval = readl_relaxed(ENABLE_REG(mux));
893
894 c->rate = clk_get_rate(c->parent);
895
896 if (mask == (regval & mask))
897 return HANDOFF_ENABLED_CLK;
898
899 return HANDOFF_DISABLED_CLK;
900}
901
Vikram Mulukutla49423392013-05-02 09:03:02 -0700902static struct clk *cam_mux_clk_get_parent(struct clk *c)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800903{
Vikram Mulukutla49423392013-05-02 09:03:02 -0700904 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800905 struct mux_source *parent = NULL;
906 u32 regval = readl_relaxed(SELECT_REG(mux));
907
908 if (!mux->sources)
909 return ERR_PTR(-EPERM);
910
911 parent = mux->sources;
912
913 while (parent->clk) {
914 if ((regval & mux->select_mask) == parent->select_val)
915 return parent->clk;
916
917 parent++;
918 }
919
920 return ERR_PTR(-EPERM);
921}
922
Vikram Mulukutla49423392013-05-02 09:03:02 -0700923static int cam_mux_clk_list_rate(struct clk *c, unsigned n)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800924{
Vikram Mulukutla49423392013-05-02 09:03:02 -0700925 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800926 int i;
927
928 for (i = 0; i < n; i++)
929 if (!mux->sources[i].clk)
930 break;
931
932 if (!mux->sources[i].clk)
933 return -ENXIO;
934
935 return clk_get_rate(mux->sources[i].clk);
936}
937
Matt Wagantalledf2fad2012-08-06 16:11:46 -0700938struct clk_ops clk_ops_empty;
939
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700940struct clk_ops clk_ops_rcg = {
Stephen Boydd86d1f22012-01-24 17:36:34 -0800941 .enable = rcg_clk_prepare,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700942 .set_rate = rcg_clk_set_rate,
943 .list_rate = rcg_clk_list_rate,
944 .round_rate = rcg_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700945 .handoff = rcg_clk_handoff,
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800946 .get_parent = rcg_clk_get_parent,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700947};
948
949struct clk_ops clk_ops_rcg_mnd = {
Stephen Boydd86d1f22012-01-24 17:36:34 -0800950 .enable = rcg_clk_prepare,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700951 .set_rate = rcg_clk_set_rate,
952 .list_rate = rcg_clk_list_rate,
953 .round_rate = rcg_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700954 .handoff = rcg_mnd_clk_handoff,
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800955 .get_parent = rcg_mnd_clk_get_parent,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700956};
957
Patrick Dalyadeeb472013-03-06 21:22:32 -0800958struct clk_ops clk_ops_pixel = {
959 .enable = rcg_clk_prepare,
960 .set_rate = set_rate_pixel,
961 .list_rate = rcg_clk_list_rate,
962 .round_rate = rcg_clk_round_rate,
963 .handoff = pixel_rcg_handoff,
964};
965
966struct clk_ops clk_ops_byte = {
967 .enable = rcg_clk_prepare,
968 .set_rate = set_rate_byte,
969 .list_rate = rcg_clk_list_rate,
970 .round_rate = rcg_clk_round_rate,
971 .handoff = byte_rcg_handoff,
972};
973
974struct clk_ops clk_ops_rcg_hdmi = {
975 .enable = rcg_clk_prepare,
976 .set_rate = rcg_clk_set_rate_hdmi,
Patrick Dalyadeeb472013-03-06 21:22:32 -0800977 .list_rate = rcg_clk_list_rate,
978 .round_rate = rcg_clk_round_rate,
979 .handoff = rcg_clk_handoff,
980 .get_parent = rcg_clk_get_parent,
981};
982
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700983struct clk_ops clk_ops_branch = {
984 .enable = branch_clk_enable,
985 .disable = branch_clk_disable,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700986 .set_rate = branch_clk_set_rate,
987 .get_rate = branch_clk_get_rate,
988 .list_rate = branch_clk_list_rate,
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700989 .round_rate = branch_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700990 .reset = branch_clk_reset,
Matt Wagantall39f38d92012-01-20 18:48:05 -0800991 .set_flags = branch_clk_set_flags,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700992 .handoff = branch_clk_handoff,
993};
994
995struct clk_ops clk_ops_vote = {
996 .enable = local_vote_clk_enable,
997 .disable = local_vote_clk_disable,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700998 .reset = local_vote_clk_reset,
999 .handoff = local_vote_clk_handoff,
1000};
Vikram Mulukutla27859df2013-01-17 20:56:15 -08001001
Vikram Mulukutla49423392013-05-02 09:03:02 -07001002struct clk_ops clk_ops_cam_mux = {
1003 .enable = cam_mux_clk_enable,
1004 .disable = cam_mux_clk_disable,
1005 .set_parent = cam_mux_clk_set_parent,
1006 .get_parent = cam_mux_clk_get_parent,
1007 .handoff = cam_mux_clk_handoff,
1008 .list_rate = cam_mux_clk_list_rate,
Vikram Mulukutla27859df2013-01-17 20:56:15 -08001009};
1010
1011