blob: b7852fe9120dd94af3bab61411360584f9c87afd [file] [log] [blame]
Saravana Kannanc85ecf92013-01-21 17:58:35 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Vikram Mulukutla8810e342011-10-20 20:26:53 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/err.h>
19#include <linux/ctype.h>
20#include <linux/bitops.h>
21#include <linux/io.h>
22#include <linux/spinlock.h>
23#include <linux/delay.h>
24#include <linux/clk.h>
25
26#include <mach/clk.h>
Matt Wagantall33d01f52012-02-23 23:27:44 -080027#include <mach/clk-provider.h>
Vikram Mulukutla8810e342011-10-20 20:26:53 -070028
Vikram Mulukutla8810e342011-10-20 20:26:53 -070029#include "clock-local2.h"
30
31/*
32 * When enabling/disabling a clock, check the halt bit up to this number
33 * number of times (with a 1 us delay in between) before continuing.
34 */
Patrick Daly20ca9d12013-03-11 20:06:20 -070035#define HALT_CHECK_MAX_LOOPS 500
Vikram Mulukutla8810e342011-10-20 20:26:53 -070036/* For clock without halt checking, wait this long after enables/disables. */
37#define HALT_CHECK_DELAY_US 10
38
39/*
40 * When updating an RCG configuration, check the update bit up to this number
41 * number of times (with a 1 us delay in between) before continuing.
42 */
Patrick Daly20ca9d12013-03-11 20:06:20 -070043#define UPDATE_CHECK_MAX_LOOPS 500
Vikram Mulukutla8810e342011-10-20 20:26:53 -070044
45DEFINE_SPINLOCK(local_clock_reg_lock);
46struct clk_freq_tbl rcg_dummy_freq = F_END;
47
48#define CMD_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg)
49#define CFG_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x4)
50#define M_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x8)
51#define N_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0xC)
52#define D_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x10)
53#define CBCR_REG(x) (*(x)->base + (x)->cbcr_reg)
54#define BCR_REG(x) (*(x)->base + (x)->bcr_reg)
55#define VOTE_REG(x) (*(x)->base + (x)->vote_reg)
56
57/*
58 * Important clock bit positions and masks
59 */
60#define CMD_RCGR_ROOT_ENABLE_BIT BIT(1)
61#define CBCR_BRANCH_ENABLE_BIT BIT(0)
62#define CBCR_BRANCH_OFF_BIT BIT(31)
63#define CMD_RCGR_CONFIG_UPDATE_BIT BIT(0)
64#define CMD_RCGR_ROOT_STATUS_BIT BIT(31)
65#define BCR_BLK_ARES_BIT BIT(0)
66#define CBCR_HW_CTL_BIT BIT(1)
67#define CFG_RCGR_DIV_MASK BM(4, 0)
68#define CFG_RCGR_SRC_SEL_MASK BM(10, 8)
69#define MND_MODE_MASK BM(13, 12)
70#define MND_DUAL_EDGE_MODE_BVAL BVAL(13, 12, 0x2)
71#define CMD_RCGR_CONFIG_DIRTY_MASK BM(7, 4)
Saravana Kannanc85ecf92013-01-21 17:58:35 -080072#define CBCR_CDIV_LSB 16
73#define CBCR_CDIV_MSB 24
Vikram Mulukutla8810e342011-10-20 20:26:53 -070074
75enum branch_state {
76 BRANCH_ON,
77 BRANCH_OFF,
78};
79
80/*
81 * RCG functions
82 */
83
84/*
85 * Update an RCG with a new configuration. This may include a new M, N, or D
86 * value, source selection or pre-divider value.
87 *
88 */
89static void rcg_update_config(struct rcg_clk *rcg)
90{
91 u32 cmd_rcgr_regval, count;
92
93 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
94 cmd_rcgr_regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
95 writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
96
97 /* Wait for update to take effect */
98 for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
99 if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
100 CMD_RCGR_CONFIG_UPDATE_BIT))
101 return;
102 udelay(1);
103 }
104
105 WARN(count == 0, "%s: rcg didn't update its configuration.",
106 rcg->c.dbg_name);
107}
108
109/* RCG set rate function for clocks with Half Integer Dividers. */
110void set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
111{
112 u32 cfg_regval;
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700113 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700114
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700115 spin_lock_irqsave(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700116 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
117 cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
118 cfg_regval |= nf->div_src_val;
119 writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
120
121 rcg_update_config(rcg);
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700122 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700123}
124
125/* RCG set rate function for clocks with MND & Half Integer Dividers. */
126void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
127{
128 u32 cfg_regval;
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700129 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700130
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700131 spin_lock_irqsave(&local_clock_reg_lock, flags);
132 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700133 writel_relaxed(nf->m_val, M_REG(rcg));
134 writel_relaxed(nf->n_val, N_REG(rcg));
135 writel_relaxed(nf->d_val, D_REG(rcg));
136
137 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
138 cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
139 cfg_regval |= nf->div_src_val;
140
141 /* Activate or disable the M/N:D divider as necessary */
142 cfg_regval &= ~MND_MODE_MASK;
143 if (nf->n_val != 0)
144 cfg_regval |= MND_DUAL_EDGE_MODE_BVAL;
145 writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
146
147 rcg_update_config(rcg);
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700148 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700149}
150
Stephen Boydd86d1f22012-01-24 17:36:34 -0800151static int rcg_clk_prepare(struct clk *c)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700152{
153 struct rcg_clk *rcg = to_rcg_clk(c);
154
155 WARN(rcg->current_freq == &rcg_dummy_freq,
Stephen Boydd86d1f22012-01-24 17:36:34 -0800156 "Attempting to prepare %s before setting its rate. "
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700157 "Set the rate first!\n", rcg->c.dbg_name);
158
159 return 0;
160}
161
162static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
163{
164 struct clk_freq_tbl *cf, *nf;
165 struct rcg_clk *rcg = to_rcg_clk(c);
Stephen Boydd86d1f22012-01-24 17:36:34 -0800166 int rc;
167 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700168
169 for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
170 && nf->freq_hz != rate; nf++)
171 ;
172
173 if (nf->freq_hz == FREQ_END)
174 return -EINVAL;
175
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700176 cf = rcg->current_freq;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700177
Vikram Mulukutlaa7a12682013-05-21 12:02:21 -0700178 rc = __clk_pre_reparent(c, nf->src_clk, &flags);
179 if (rc)
180 return rc;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700181
182 BUG_ON(!rcg->set_rate);
183
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700184 /* Perform clock-specific frequency switch operations. */
185 rcg->set_rate(rcg, nf);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700186 rcg->current_freq = nf;
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700187 c->parent = nf->src_clk;
Stephen Boydd86d1f22012-01-24 17:36:34 -0800188
Vikram Mulukutlaa7a12682013-05-21 12:02:21 -0700189 __clk_post_reparent(c, cf->src_clk, &flags);
190
Stephen Boydd86d1f22012-01-24 17:36:34 -0800191 return 0;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700192}
193
Tianyi Gou43215f372013-03-15 12:01:30 -0700194/*
195 * Return a supported rate that's at least the specified rate or
196 * the max supported rate if the specified rate is larger than the
197 * max supported rate.
198 */
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700199static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
200{
201 struct rcg_clk *rcg = to_rcg_clk(c);
202 struct clk_freq_tbl *f;
203
204 for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
205 if (f->freq_hz >= rate)
206 return f->freq_hz;
207
Tianyi Gou43215f372013-03-15 12:01:30 -0700208 f--;
209 return f->freq_hz;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700210}
211
212/* Return the nth supported frequency for a given clock. */
Saravana Kannane02bd3a2013-07-02 10:31:18 -0700213static long rcg_clk_list_rate(struct clk *c, unsigned n)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700214{
215 struct rcg_clk *rcg = to_rcg_clk(c);
216
217 if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
218 return -ENXIO;
219
220 return (rcg->freq_tbl + n)->freq_hz;
221}
222
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800223static struct clk *_rcg_clk_get_parent(struct rcg_clk *rcg, int has_mnd)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700224{
225 u32 n_regval = 0, m_regval = 0, d_regval = 0;
226 u32 cfg_regval;
227 struct clk_freq_tbl *freq;
228 u32 cmd_rcgr_regval;
229
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700230 /* Is there a pending configuration? */
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800231 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700232 if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800233 return NULL;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700234
235 /* Get values of m, n, d, div and src_sel registers. */
236 if (has_mnd) {
237 m_regval = readl_relaxed(M_REG(rcg));
238 n_regval = readl_relaxed(N_REG(rcg));
239 d_regval = readl_relaxed(D_REG(rcg));
240
241 /*
242 * The n and d values stored in the frequency tables are sign
243 * extended to 32 bits. The n and d values in the registers are
244 * sign extended to 8 or 16 bits. Sign extend the values read
245 * from the registers so that they can be compared to the
246 * values in the frequency tables.
247 */
248 n_regval |= (n_regval >> 8) ? BM(31, 16) : BM(31, 8);
249 d_regval |= (d_regval >> 8) ? BM(31, 16) : BM(31, 8);
250 }
251
252 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
253 cfg_regval &= CFG_RCGR_SRC_SEL_MASK | CFG_RCGR_DIV_MASK
254 | MND_MODE_MASK;
255
256 /* If mnd counter is present, check if it's in use. */
257 has_mnd = (has_mnd) &&
258 ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL);
259
260 /*
261 * Clear out the mn counter mode bits since we now want to compare only
262 * the source mux selection and pre-divider values in the registers.
263 */
264 cfg_regval &= ~MND_MODE_MASK;
265
266 /* Figure out what rate the rcg is running at */
267 for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
268 if (freq->div_src_val != cfg_regval)
269 continue;
270 if (has_mnd) {
271 if (freq->m_val != m_regval)
272 continue;
273 if (freq->n_val != n_regval)
274 continue;
275 if (freq->d_val != d_regval)
276 continue;
277 }
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700278 break;
279 }
280
281 /* No known frequency found */
282 if (freq->freq_hz == FREQ_END)
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800283 return NULL;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700284
285 rcg->current_freq = freq;
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800286 return freq->src_clk;
287}
288
289static enum handoff _rcg_clk_handoff(struct rcg_clk *rcg)
290{
291 u32 cmd_rcgr_regval;
292
293 if (rcg->current_freq && rcg->current_freq->freq_hz != FREQ_END)
294 rcg->c.rate = rcg->current_freq->freq_hz;
295
296 /* Is the root enabled? */
297 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
298 if ((cmd_rcgr_regval & CMD_RCGR_ROOT_STATUS_BIT))
299 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700300
301 return HANDOFF_ENABLED_CLK;
302}
303
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800304static struct clk *rcg_mnd_clk_get_parent(struct clk *c)
305{
306 return _rcg_clk_get_parent(to_rcg_clk(c), 1);
307}
308
309static struct clk *rcg_clk_get_parent(struct clk *c)
310{
311 return _rcg_clk_get_parent(to_rcg_clk(c), 0);
312}
313
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700314static enum handoff rcg_mnd_clk_handoff(struct clk *c)
315{
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800316 return _rcg_clk_handoff(to_rcg_clk(c));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700317}
318
319static enum handoff rcg_clk_handoff(struct clk *c)
320{
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800321 return _rcg_clk_handoff(to_rcg_clk(c));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700322}
323
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700324#define BRANCH_CHECK_MASK BM(31, 28)
325#define BRANCH_ON_VAL BVAL(31, 28, 0x0)
326#define BRANCH_OFF_VAL BVAL(31, 28, 0x8)
327#define BRANCH_NOC_FSM_ON_VAL BVAL(31, 28, 0x2)
328
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700329/*
330 * Branch clock functions
331 */
332static void branch_clk_halt_check(u32 halt_check, const char *clk_name,
333 void __iomem *cbcr_reg,
334 enum branch_state br_status)
335{
Vikram Mulukutla86b9fa62012-05-02 16:39:14 -0700336 char *status_str = (br_status == BRANCH_ON) ? "off" : "on";
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700337
338 /*
339 * Use a memory barrier since some halt status registers are
340 * not within the same 1K segment as the branch/root enable
341 * registers. It's also needed in the udelay() case to ensure
342 * the delay starts after the branch disable.
343 */
344 mb();
345
346 if (halt_check == DELAY || halt_check == HALT_VOTED) {
347 udelay(HALT_CHECK_DELAY_US);
348 } else if (halt_check == HALT) {
349 int count;
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700350 u32 val;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700351 for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700352 val = readl_relaxed(cbcr_reg);
353 val &= BRANCH_CHECK_MASK;
354 switch (br_status) {
355 case BRANCH_ON:
356 if (val == BRANCH_ON_VAL
357 || val == BRANCH_NOC_FSM_ON_VAL)
358 return;
359 break;
360
361 case BRANCH_OFF:
362 if (val == BRANCH_OFF_VAL)
363 return;
364 break;
365 };
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700366 udelay(1);
367 }
368 WARN(count == 0, "%s status stuck %s", clk_name, status_str);
369 }
370}
371
372static int branch_clk_enable(struct clk *c)
373{
374 unsigned long flags;
375 u32 cbcr_val;
376 struct branch_clk *branch = to_branch_clk(c);
377
378 spin_lock_irqsave(&local_clock_reg_lock, flags);
379 cbcr_val = readl_relaxed(CBCR_REG(branch));
380 cbcr_val |= CBCR_BRANCH_ENABLE_BIT;
381 writel_relaxed(cbcr_val, CBCR_REG(branch));
382 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
383
384 /* Wait for clock to enable before continuing. */
385 branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
386 CBCR_REG(branch), BRANCH_ON);
387
388 return 0;
389}
390
391static void branch_clk_disable(struct clk *c)
392{
393 unsigned long flags;
394 struct branch_clk *branch = to_branch_clk(c);
395 u32 reg_val;
396
397 spin_lock_irqsave(&local_clock_reg_lock, flags);
398 reg_val = readl_relaxed(CBCR_REG(branch));
399 reg_val &= ~CBCR_BRANCH_ENABLE_BIT;
400 writel_relaxed(reg_val, CBCR_REG(branch));
401 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
402
403 /* Wait for clock to disable before continuing. */
404 branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
405 CBCR_REG(branch), BRANCH_OFF);
406}
407
408static int branch_cdiv_set_rate(struct branch_clk *branch, unsigned long rate)
409{
410 unsigned long flags;
411 u32 regval;
412
413 if (rate > branch->max_div)
414 return -EINVAL;
415
416 spin_lock_irqsave(&local_clock_reg_lock, flags);
417 regval = readl_relaxed(CBCR_REG(branch));
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800418 regval &= ~BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
419 regval |= BVAL(CBCR_CDIV_MSB, CBCR_CDIV_LSB, rate);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700420 writel_relaxed(regval, CBCR_REG(branch));
421 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
422
423 return 0;
424}
425
426static int branch_clk_set_rate(struct clk *c, unsigned long rate)
427{
428 struct branch_clk *branch = to_branch_clk(c);
429
430 if (branch->max_div)
431 return branch_cdiv_set_rate(branch, rate);
432
433 if (!branch->has_sibling)
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700434 return clk_set_rate(c->parent, rate);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700435
436 return -EPERM;
437}
438
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700439static long branch_clk_round_rate(struct clk *c, unsigned long rate)
440{
441 struct branch_clk *branch = to_branch_clk(c);
442
443 if (branch->max_div)
444 return rate <= (branch->max_div) ? rate : -EPERM;
445
446 if (!branch->has_sibling)
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700447 return clk_round_rate(c->parent, rate);
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700448
449 return -EPERM;
450}
451
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700452static unsigned long branch_clk_get_rate(struct clk *c)
453{
454 struct branch_clk *branch = to_branch_clk(c);
455
456 if (branch->max_div)
457 return branch->c.rate;
458
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800459 return clk_get_rate(c->parent);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700460}
461
Saravana Kannane02bd3a2013-07-02 10:31:18 -0700462static long branch_clk_list_rate(struct clk *c, unsigned n)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700463{
Patrick Daly13e22ed2012-10-11 14:31:11 -0700464 int level, fmax = 0, rate;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700465 struct branch_clk *branch = to_branch_clk(c);
Patrick Daly13e22ed2012-10-11 14:31:11 -0700466 struct clk *parent = c->parent;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700467
468 if (branch->has_sibling == 1)
469 return -ENXIO;
470
Patrick Daly13e22ed2012-10-11 14:31:11 -0700471 if (!parent || !parent->ops->list_rate)
472 return -ENXIO;
473
474 /* Find max frequency supported within voltage constraints. */
475 if (!parent->vdd_class) {
476 fmax = INT_MAX;
477 } else {
478 for (level = 0; level < parent->num_fmax; level++)
479 if (parent->fmax[level])
480 fmax = parent->fmax[level];
481 }
482
483 rate = parent->ops->list_rate(parent, n);
484 if (rate <= fmax)
485 return rate;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700486 else
Vikram Mulukutlae0589fc2012-09-20 12:19:16 -0700487 return -ENXIO;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700488}
489
490static enum handoff branch_clk_handoff(struct clk *c)
491{
492 struct branch_clk *branch = to_branch_clk(c);
493 u32 cbcr_regval;
494
495 cbcr_regval = readl_relaxed(CBCR_REG(branch));
496 if ((cbcr_regval & CBCR_BRANCH_OFF_BIT))
497 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700498
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800499 if (branch->max_div) {
500 cbcr_regval &= BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
501 cbcr_regval >>= CBCR_CDIV_LSB;
502 c->rate = cbcr_regval;
503 } else if (!branch->has_sibling) {
504 c->rate = clk_get_rate(c->parent);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700505 }
506
507 return HANDOFF_ENABLED_CLK;
508}
509
510static int __branch_clk_reset(void __iomem *bcr_reg,
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700511 enum clk_reset_action action)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700512{
513 int ret = 0;
514 unsigned long flags;
515 u32 reg_val;
516
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700517 spin_lock_irqsave(&local_clock_reg_lock, flags);
518 reg_val = readl_relaxed(bcr_reg);
519 switch (action) {
520 case CLK_RESET_ASSERT:
521 reg_val |= BCR_BLK_ARES_BIT;
522 break;
523 case CLK_RESET_DEASSERT:
524 reg_val &= ~BCR_BLK_ARES_BIT;
525 break;
526 default:
527 ret = -EINVAL;
528 }
529 writel_relaxed(reg_val, bcr_reg);
530 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
531
532 /* Make sure write is issued before returning. */
533 mb();
534
535 return ret;
536}
537
538static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
539{
540 struct branch_clk *branch = to_branch_clk(c);
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700541
Matt Wagantalld8d735d2013-05-20 20:12:08 -0700542 if (!branch->bcr_reg)
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700543 return -EPERM;
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700544 return __branch_clk_reset(BCR_REG(branch), action);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700545}
546
Matt Wagantall39f38d92012-01-20 18:48:05 -0800547static int branch_clk_set_flags(struct clk *c, unsigned flags)
548{
549 u32 cbcr_val;
550 unsigned long irq_flags;
551 struct branch_clk *branch = to_branch_clk(c);
552 int ret = 0;
553
554 spin_lock_irqsave(&local_clock_reg_lock, irq_flags);
555 cbcr_val = readl_relaxed(CBCR_REG(branch));
556 switch (flags) {
557 case CLKFLAG_RETAIN_PERIPH:
558 cbcr_val |= BIT(13);
559 break;
560 case CLKFLAG_NORETAIN_PERIPH:
561 cbcr_val &= ~BIT(13);
562 break;
563 case CLKFLAG_RETAIN_MEM:
564 cbcr_val |= BIT(14);
565 break;
566 case CLKFLAG_NORETAIN_MEM:
567 cbcr_val &= ~BIT(14);
568 break;
569 default:
570 ret = -EINVAL;
571 }
572 writel_relaxed(cbcr_val, CBCR_REG(branch));
Matt Wagantalledb50372013-04-15 16:06:56 -0700573 /*
574 * 8974v2.2 has a requirement that writes to set bits 13 and 14 are
575 * separated by at least 2 bus cycles. Cover one of these cycles by
576 * performing an extra write here. The other cycle is covered by the
577 * read-modify-write design of this function.
578 */
579 writel_relaxed(cbcr_val, CBCR_REG(branch));
Matt Wagantall39f38d92012-01-20 18:48:05 -0800580 spin_unlock_irqrestore(&local_clock_reg_lock, irq_flags);
581
582 /* Make sure write is issued before returning. */
583 mb();
584
585 return ret;
586}
587
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700588/*
589 * Voteable clock functions
590 */
591static int local_vote_clk_reset(struct clk *c, enum clk_reset_action action)
592{
Vikram Mulukutla27784c02012-06-06 13:37:36 -0700593 struct local_vote_clk *vclk = to_local_vote_clk(c);
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700594
595 if (!vclk->bcr_reg) {
596 WARN("clk_reset called on an unsupported clock (%s)\n",
597 c->dbg_name);
598 return -EPERM;
599 }
600 return __branch_clk_reset(BCR_REG(vclk), action);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700601}
602
603static int local_vote_clk_enable(struct clk *c)
604{
605 unsigned long flags;
606 u32 ena;
607 struct local_vote_clk *vclk = to_local_vote_clk(c);
608
609 spin_lock_irqsave(&local_clock_reg_lock, flags);
610 ena = readl_relaxed(VOTE_REG(vclk));
611 ena |= vclk->en_mask;
612 writel_relaxed(ena, VOTE_REG(vclk));
613 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
614
615 branch_clk_halt_check(vclk->halt_check, c->dbg_name, CBCR_REG(vclk),
616 BRANCH_ON);
617
618 return 0;
619}
620
621static void local_vote_clk_disable(struct clk *c)
622{
623 unsigned long flags;
624 u32 ena;
625 struct local_vote_clk *vclk = to_local_vote_clk(c);
626
627 spin_lock_irqsave(&local_clock_reg_lock, flags);
628 ena = readl_relaxed(VOTE_REG(vclk));
629 ena &= ~vclk->en_mask;
630 writel_relaxed(ena, VOTE_REG(vclk));
631 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
632}
633
634static enum handoff local_vote_clk_handoff(struct clk *c)
635{
636 struct local_vote_clk *vclk = to_local_vote_clk(c);
637 u32 vote_regval;
638
639 /* Is the branch voted on by apps? */
640 vote_regval = readl_relaxed(VOTE_REG(vclk));
641 if (!(vote_regval & vclk->en_mask))
642 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700643
644 return HANDOFF_ENABLED_CLK;
645}
646
Vikram Mulukutlae03b4b62013-03-20 17:45:37 -0700647enum handoff byte_rcg_handoff(struct clk *clk)
Patrick Dalyadeeb472013-03-06 21:22:32 -0800648{
649 struct rcg_clk *rcg = to_rcg_clk(clk);
650 u32 div_val;
651 unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
652
653 /* If the pre-divider is used, find the rate after the division */
654 div_val = readl_relaxed(CFG_RCGR_REG(rcg)) & CFG_RCGR_DIV_MASK;
655 if (div_val > 1)
656 pre_div_rate = parent_rate / ((div_val + 1) >> 1);
657 else
658 pre_div_rate = parent_rate;
659
660 clk->rate = pre_div_rate;
661
662 if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
663 return HANDOFF_DISABLED_CLK;
664
665 return HANDOFF_ENABLED_CLK;
666}
667
668static int set_rate_byte(struct clk *clk, unsigned long rate)
669{
670 struct rcg_clk *rcg = to_rcg_clk(clk);
671 struct clk *pll = clk->parent;
672 unsigned long source_rate, div;
673 struct clk_freq_tbl *byte_freq = rcg->current_freq;
674 int rc;
675
676 if (rate == 0)
677 return -EINVAL;
678
679 rc = clk_set_rate(pll, rate);
680 if (rc)
681 return rc;
682
683 source_rate = clk_round_rate(pll, rate);
684 if ((2 * source_rate) % rate)
685 return -EINVAL;
686
687 div = ((2 * source_rate)/rate) - 1;
688 if (div > CFG_RCGR_DIV_MASK)
689 return -EINVAL;
690
691 byte_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
692 byte_freq->div_src_val |= BVAL(4, 0, div);
693 set_rate_hid(rcg, byte_freq);
694
695 return 0;
696}
697
Vikram Mulukutlae03b4b62013-03-20 17:45:37 -0700698enum handoff pixel_rcg_handoff(struct clk *clk)
Patrick Dalyadeeb472013-03-06 21:22:32 -0800699{
700 struct rcg_clk *rcg = to_rcg_clk(clk);
Vikram Mulukutla4c93ce72013-05-02 20:16:49 -0700701 u32 div_val = 0, mval = 0, nval = 0, cfg_regval;
Patrick Dalyadeeb472013-03-06 21:22:32 -0800702 unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
703
704 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
705
706 /* If the pre-divider is used, find the rate after the division */
707 div_val = cfg_regval & CFG_RCGR_DIV_MASK;
708 if (div_val > 1)
709 pre_div_rate = parent_rate / ((div_val + 1) >> 1);
710 else
711 pre_div_rate = parent_rate;
712
713 clk->rate = pre_div_rate;
714
Vikram Mulukutla4c93ce72013-05-02 20:16:49 -0700715 /*
716 * Pixel clocks have one frequency entry in their frequency table.
717 * Update that entry.
718 */
719 if (rcg->current_freq) {
720 rcg->current_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
721 rcg->current_freq->div_src_val |= div_val;
722 }
723
Patrick Dalyadeeb472013-03-06 21:22:32 -0800724 /* If MND is used, find the rate after the MND division */
725 if ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL) {
726 mval = readl_relaxed(M_REG(rcg));
727 nval = readl_relaxed(N_REG(rcg));
728 if (!nval)
729 return HANDOFF_DISABLED_CLK;
730 nval = (~nval) + mval;
Vikram Mulukutla4c93ce72013-05-02 20:16:49 -0700731 if (rcg->current_freq) {
732 rcg->current_freq->n_val = ~(nval - mval);
733 rcg->current_freq->m_val = mval;
734 rcg->current_freq->d_val = ~nval;
735 }
Patrick Dalyadeeb472013-03-06 21:22:32 -0800736 clk->rate = (pre_div_rate * mval) / nval;
737 }
738
739 if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
740 return HANDOFF_DISABLED_CLK;
741
742 return HANDOFF_ENABLED_CLK;
743}
744
745static int set_rate_pixel(struct clk *clk, unsigned long rate)
746{
747 struct rcg_clk *rcg = to_rcg_clk(clk);
748 struct clk *pll = clk->parent;
749 unsigned long source_rate, div;
750 struct clk_freq_tbl *pixel_freq = rcg->current_freq;
751 int rc;
752
753 if (rate == 0)
754 return -EINVAL;
755
756 rc = clk_set_rate(pll, rate);
757 if (rc)
758 return rc;
759
760 source_rate = clk_round_rate(pll, rate);
761 if ((2 * source_rate) % rate)
762 return -EINVAL;
763
764 div = ((2 * source_rate)/rate) - 1;
765 if (div > CFG_RCGR_DIV_MASK)
766 return -EINVAL;
767
768 pixel_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
769 pixel_freq->div_src_val |= BVAL(4, 0, div);
770 set_rate_mnd(rcg, pixel_freq);
771
772 return 0;
773}
774
775/*
776 * Unlike other clocks, the HDMI rate is adjusted through PLL
777 * re-programming. It is also routed through an HID divider.
778 */
779static int rcg_clk_set_rate_hdmi(struct clk *c, unsigned long rate)
780{
781 struct clk_freq_tbl *nf;
782 struct rcg_clk *rcg = to_rcg_clk(c);
783 int rc;
784
785 for (nf = rcg->freq_tbl; nf->freq_hz != rate; nf++)
786 if (nf->freq_hz == FREQ_END) {
787 rc = -EINVAL;
788 goto out;
789 }
790
791 rc = clk_set_rate(nf->src_clk, rate);
792 if (rc < 0)
793 goto out;
794 set_rate_hid(rcg, nf);
795
796 rcg->current_freq = nf;
797 c->parent = nf->src_clk;
798out:
799 return rc;
800}
801
802
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800803#define ENABLE_REG(x) (*(x)->base + (x)->enable_reg)
804#define SELECT_REG(x) (*(x)->base + (x)->select_reg)
805
806/*
807 * mux clock functions
808 */
Vikram Mulukutla49423392013-05-02 09:03:02 -0700809static void cam_mux_clk_halt_check(void)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800810{
811 /* Ensure that the delay starts after the mux disable/enable. */
812 mb();
813 udelay(HALT_CHECK_DELAY_US);
814}
815
Vikram Mulukutla49423392013-05-02 09:03:02 -0700816static int cam_mux_clk_enable(struct clk *c)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800817{
818 unsigned long flags;
819 u32 regval;
Vikram Mulukutla49423392013-05-02 09:03:02 -0700820 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800821
822 spin_lock_irqsave(&local_clock_reg_lock, flags);
823 regval = readl_relaxed(ENABLE_REG(mux));
824 regval |= mux->enable_mask;
825 writel_relaxed(regval, ENABLE_REG(mux));
826 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
827
828 /* Wait for clock to enable before continuing. */
Vikram Mulukutla49423392013-05-02 09:03:02 -0700829 cam_mux_clk_halt_check();
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800830
831 return 0;
832}
833
Vikram Mulukutla49423392013-05-02 09:03:02 -0700834static void cam_mux_clk_disable(struct clk *c)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800835{
836 unsigned long flags;
Vikram Mulukutla49423392013-05-02 09:03:02 -0700837 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800838 u32 regval;
839
840 spin_lock_irqsave(&local_clock_reg_lock, flags);
841 regval = readl_relaxed(ENABLE_REG(mux));
842 regval &= ~mux->enable_mask;
843 writel_relaxed(regval, ENABLE_REG(mux));
844 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
845
846 /* Wait for clock to disable before continuing. */
Vikram Mulukutla49423392013-05-02 09:03:02 -0700847 cam_mux_clk_halt_check();
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800848}
849
Vikram Mulukutla49423392013-05-02 09:03:02 -0700850static int mux_source_switch(struct cam_mux_clk *mux, struct mux_source *dest)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800851{
852 unsigned long flags;
853 u32 regval;
854 int ret = 0;
855
856 ret = __clk_pre_reparent(&mux->c, dest->clk, &flags);
857 if (ret)
858 goto out;
859
860 regval = readl_relaxed(SELECT_REG(mux));
861 regval &= ~mux->select_mask;
862 regval |= dest->select_val;
863 writel_relaxed(regval, SELECT_REG(mux));
864
865 /* Make sure switch request goes through before proceeding. */
866 mb();
867
868 __clk_post_reparent(&mux->c, mux->c.parent, &flags);
869out:
870 return ret;
871}
872
Vikram Mulukutla49423392013-05-02 09:03:02 -0700873static int cam_mux_clk_set_parent(struct clk *c, struct clk *parent)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800874{
Vikram Mulukutla49423392013-05-02 09:03:02 -0700875 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800876 struct mux_source *dest = NULL;
877 int ret;
878
879 if (!mux->sources || !parent)
880 return -EPERM;
881
882 dest = mux->sources;
883
884 while (dest->clk) {
885 if (dest->clk == parent)
886 break;
887 dest++;
888 }
889
890 if (!dest->clk)
891 return -EPERM;
892
893 ret = mux_source_switch(mux, dest);
894 if (ret)
895 return ret;
896
897 mux->c.rate = clk_get_rate(dest->clk);
898
899 return 0;
900}
901
Vikram Mulukutla49423392013-05-02 09:03:02 -0700902static enum handoff cam_mux_clk_handoff(struct clk *c)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800903{
Vikram Mulukutla49423392013-05-02 09:03:02 -0700904 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800905 u32 mask = mux->enable_mask;
906 u32 regval = readl_relaxed(ENABLE_REG(mux));
907
908 c->rate = clk_get_rate(c->parent);
909
910 if (mask == (regval & mask))
911 return HANDOFF_ENABLED_CLK;
912
913 return HANDOFF_DISABLED_CLK;
914}
915
Vikram Mulukutla49423392013-05-02 09:03:02 -0700916static struct clk *cam_mux_clk_get_parent(struct clk *c)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800917{
Vikram Mulukutla49423392013-05-02 09:03:02 -0700918 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800919 struct mux_source *parent = NULL;
920 u32 regval = readl_relaxed(SELECT_REG(mux));
921
922 if (!mux->sources)
923 return ERR_PTR(-EPERM);
924
925 parent = mux->sources;
926
927 while (parent->clk) {
928 if ((regval & mux->select_mask) == parent->select_val)
929 return parent->clk;
930
931 parent++;
932 }
933
934 return ERR_PTR(-EPERM);
935}
936
Saravana Kannane02bd3a2013-07-02 10:31:18 -0700937static long cam_mux_clk_list_rate(struct clk *c, unsigned n)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800938{
Vikram Mulukutla49423392013-05-02 09:03:02 -0700939 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800940 int i;
941
942 for (i = 0; i < n; i++)
943 if (!mux->sources[i].clk)
944 break;
945
946 if (!mux->sources[i].clk)
947 return -ENXIO;
948
949 return clk_get_rate(mux->sources[i].clk);
950}
951
Matt Wagantalledf2fad2012-08-06 16:11:46 -0700952struct clk_ops clk_ops_empty;
953
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700954struct clk_ops clk_ops_rcg = {
Stephen Boydd86d1f22012-01-24 17:36:34 -0800955 .enable = rcg_clk_prepare,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700956 .set_rate = rcg_clk_set_rate,
957 .list_rate = rcg_clk_list_rate,
958 .round_rate = rcg_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700959 .handoff = rcg_clk_handoff,
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800960 .get_parent = rcg_clk_get_parent,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700961};
962
963struct clk_ops clk_ops_rcg_mnd = {
Stephen Boydd86d1f22012-01-24 17:36:34 -0800964 .enable = rcg_clk_prepare,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700965 .set_rate = rcg_clk_set_rate,
966 .list_rate = rcg_clk_list_rate,
967 .round_rate = rcg_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700968 .handoff = rcg_mnd_clk_handoff,
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800969 .get_parent = rcg_mnd_clk_get_parent,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700970};
971
Patrick Dalyadeeb472013-03-06 21:22:32 -0800972struct clk_ops clk_ops_pixel = {
973 .enable = rcg_clk_prepare,
974 .set_rate = set_rate_pixel,
975 .list_rate = rcg_clk_list_rate,
976 .round_rate = rcg_clk_round_rate,
977 .handoff = pixel_rcg_handoff,
978};
979
980struct clk_ops clk_ops_byte = {
981 .enable = rcg_clk_prepare,
982 .set_rate = set_rate_byte,
983 .list_rate = rcg_clk_list_rate,
984 .round_rate = rcg_clk_round_rate,
985 .handoff = byte_rcg_handoff,
986};
987
988struct clk_ops clk_ops_rcg_hdmi = {
989 .enable = rcg_clk_prepare,
990 .set_rate = rcg_clk_set_rate_hdmi,
Patrick Dalyadeeb472013-03-06 21:22:32 -0800991 .list_rate = rcg_clk_list_rate,
992 .round_rate = rcg_clk_round_rate,
993 .handoff = rcg_clk_handoff,
994 .get_parent = rcg_clk_get_parent,
995};
996
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700997struct clk_ops clk_ops_branch = {
998 .enable = branch_clk_enable,
999 .disable = branch_clk_disable,
Vikram Mulukutla8810e342011-10-20 20:26:53 -07001000 .set_rate = branch_clk_set_rate,
1001 .get_rate = branch_clk_get_rate,
1002 .list_rate = branch_clk_list_rate,
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -07001003 .round_rate = branch_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -07001004 .reset = branch_clk_reset,
Matt Wagantall39f38d92012-01-20 18:48:05 -08001005 .set_flags = branch_clk_set_flags,
Vikram Mulukutla8810e342011-10-20 20:26:53 -07001006 .handoff = branch_clk_handoff,
1007};
1008
1009struct clk_ops clk_ops_vote = {
1010 .enable = local_vote_clk_enable,
1011 .disable = local_vote_clk_disable,
Vikram Mulukutla8810e342011-10-20 20:26:53 -07001012 .reset = local_vote_clk_reset,
1013 .handoff = local_vote_clk_handoff,
1014};
Vikram Mulukutla27859df2013-01-17 20:56:15 -08001015
Vikram Mulukutla49423392013-05-02 09:03:02 -07001016struct clk_ops clk_ops_cam_mux = {
1017 .enable = cam_mux_clk_enable,
1018 .disable = cam_mux_clk_disable,
1019 .set_parent = cam_mux_clk_set_parent,
1020 .get_parent = cam_mux_clk_get_parent,
1021 .handoff = cam_mux_clk_handoff,
1022 .list_rate = cam_mux_clk_list_rate,
Vikram Mulukutla27859df2013-01-17 20:56:15 -08001023};
1024
1025