blob: 62ecfa88060ca528f7d01322d888e6a7c0d7a17d [file] [log] [blame]
Saravana Kannanc85ecf92013-01-21 17:58:35 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Vikram Mulukutla8810e342011-10-20 20:26:53 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/err.h>
19#include <linux/ctype.h>
20#include <linux/bitops.h>
21#include <linux/io.h>
22#include <linux/spinlock.h>
23#include <linux/delay.h>
24#include <linux/clk.h>
25
26#include <mach/clk.h>
Matt Wagantall33d01f52012-02-23 23:27:44 -080027#include <mach/clk-provider.h>
Vikram Mulukutla8810e342011-10-20 20:26:53 -070028
Vikram Mulukutla8810e342011-10-20 20:26:53 -070029#include "clock-local2.h"
30
31/*
32 * When enabling/disabling a clock, check the halt bit up to this number
33 * number of times (with a 1 us delay in between) before continuing.
34 */
Patrick Daly20ca9d12013-03-11 20:06:20 -070035#define HALT_CHECK_MAX_LOOPS 500
Vikram Mulukutla8810e342011-10-20 20:26:53 -070036/* For clock without halt checking, wait this long after enables/disables. */
37#define HALT_CHECK_DELAY_US 10
38
39/*
40 * When updating an RCG configuration, check the update bit up to this number
41 * number of times (with a 1 us delay in between) before continuing.
42 */
Patrick Daly20ca9d12013-03-11 20:06:20 -070043#define UPDATE_CHECK_MAX_LOOPS 500
Vikram Mulukutla8810e342011-10-20 20:26:53 -070044
45DEFINE_SPINLOCK(local_clock_reg_lock);
46struct clk_freq_tbl rcg_dummy_freq = F_END;
47
48#define CMD_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg)
49#define CFG_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x4)
50#define M_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x8)
51#define N_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0xC)
52#define D_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x10)
53#define CBCR_REG(x) (*(x)->base + (x)->cbcr_reg)
54#define BCR_REG(x) (*(x)->base + (x)->bcr_reg)
55#define VOTE_REG(x) (*(x)->base + (x)->vote_reg)
56
57/*
58 * Important clock bit positions and masks
59 */
60#define CMD_RCGR_ROOT_ENABLE_BIT BIT(1)
61#define CBCR_BRANCH_ENABLE_BIT BIT(0)
62#define CBCR_BRANCH_OFF_BIT BIT(31)
63#define CMD_RCGR_CONFIG_UPDATE_BIT BIT(0)
64#define CMD_RCGR_ROOT_STATUS_BIT BIT(31)
65#define BCR_BLK_ARES_BIT BIT(0)
66#define CBCR_HW_CTL_BIT BIT(1)
67#define CFG_RCGR_DIV_MASK BM(4, 0)
68#define CFG_RCGR_SRC_SEL_MASK BM(10, 8)
69#define MND_MODE_MASK BM(13, 12)
70#define MND_DUAL_EDGE_MODE_BVAL BVAL(13, 12, 0x2)
71#define CMD_RCGR_CONFIG_DIRTY_MASK BM(7, 4)
Saravana Kannanc85ecf92013-01-21 17:58:35 -080072#define CBCR_CDIV_LSB 16
73#define CBCR_CDIV_MSB 24
Vikram Mulukutla8810e342011-10-20 20:26:53 -070074
75enum branch_state {
76 BRANCH_ON,
77 BRANCH_OFF,
78};
79
80/*
81 * RCG functions
82 */
83
84/*
85 * Update an RCG with a new configuration. This may include a new M, N, or D
86 * value, source selection or pre-divider value.
87 *
88 */
89static void rcg_update_config(struct rcg_clk *rcg)
90{
91 u32 cmd_rcgr_regval, count;
92
93 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
94 cmd_rcgr_regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
95 writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
96
97 /* Wait for update to take effect */
98 for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
99 if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
100 CMD_RCGR_CONFIG_UPDATE_BIT))
101 return;
102 udelay(1);
103 }
104
105 WARN(count == 0, "%s: rcg didn't update its configuration.",
106 rcg->c.dbg_name);
107}
108
109/* RCG set rate function for clocks with Half Integer Dividers. */
110void set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
111{
112 u32 cfg_regval;
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700113 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700114
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700115 spin_lock_irqsave(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700116 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
117 cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
118 cfg_regval |= nf->div_src_val;
119 writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
120
121 rcg_update_config(rcg);
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700122 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700123}
124
125/* RCG set rate function for clocks with MND & Half Integer Dividers. */
126void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
127{
128 u32 cfg_regval;
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700129 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700130
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700131 spin_lock_irqsave(&local_clock_reg_lock, flags);
132 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700133 writel_relaxed(nf->m_val, M_REG(rcg));
134 writel_relaxed(nf->n_val, N_REG(rcg));
135 writel_relaxed(nf->d_val, D_REG(rcg));
136
137 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
138 cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
139 cfg_regval |= nf->div_src_val;
140
141 /* Activate or disable the M/N:D divider as necessary */
142 cfg_regval &= ~MND_MODE_MASK;
143 if (nf->n_val != 0)
144 cfg_regval |= MND_DUAL_EDGE_MODE_BVAL;
145 writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
146
147 rcg_update_config(rcg);
Vikram Mulukutla8c260552012-08-20 22:23:02 -0700148 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700149}
150
Stephen Boydd86d1f22012-01-24 17:36:34 -0800151static int rcg_clk_prepare(struct clk *c)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700152{
153 struct rcg_clk *rcg = to_rcg_clk(c);
154
155 WARN(rcg->current_freq == &rcg_dummy_freq,
Stephen Boydd86d1f22012-01-24 17:36:34 -0800156 "Attempting to prepare %s before setting its rate. "
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700157 "Set the rate first!\n", rcg->c.dbg_name);
158
159 return 0;
160}
161
162static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
163{
164 struct clk_freq_tbl *cf, *nf;
165 struct rcg_clk *rcg = to_rcg_clk(c);
Stephen Boydd86d1f22012-01-24 17:36:34 -0800166 int rc;
167 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700168
169 for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
170 && nf->freq_hz != rate; nf++)
171 ;
172
173 if (nf->freq_hz == FREQ_END)
174 return -EINVAL;
175
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700176 cf = rcg->current_freq;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700177
Vikram Mulukutlaa7a12682013-05-21 12:02:21 -0700178 rc = __clk_pre_reparent(c, nf->src_clk, &flags);
179 if (rc)
180 return rc;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700181
182 BUG_ON(!rcg->set_rate);
183
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700184 /* Perform clock-specific frequency switch operations. */
185 rcg->set_rate(rcg, nf);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700186 rcg->current_freq = nf;
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700187 c->parent = nf->src_clk;
Stephen Boydd86d1f22012-01-24 17:36:34 -0800188
Vikram Mulukutlaa7a12682013-05-21 12:02:21 -0700189 __clk_post_reparent(c, cf->src_clk, &flags);
190
Stephen Boydd86d1f22012-01-24 17:36:34 -0800191 return 0;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700192}
193
Tianyi Gou43215f372013-03-15 12:01:30 -0700194/*
195 * Return a supported rate that's at least the specified rate or
196 * the max supported rate if the specified rate is larger than the
197 * max supported rate.
198 */
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700199static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
200{
201 struct rcg_clk *rcg = to_rcg_clk(c);
202 struct clk_freq_tbl *f;
203
204 for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
205 if (f->freq_hz >= rate)
206 return f->freq_hz;
207
Tianyi Gou43215f372013-03-15 12:01:30 -0700208 f--;
209 return f->freq_hz;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700210}
211
212/* Return the nth supported frequency for a given clock. */
Saravana Kannane02bd3a2013-07-02 10:31:18 -0700213static long rcg_clk_list_rate(struct clk *c, unsigned n)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700214{
215 struct rcg_clk *rcg = to_rcg_clk(c);
216
217 if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
218 return -ENXIO;
219
220 return (rcg->freq_tbl + n)->freq_hz;
221}
222
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800223static struct clk *_rcg_clk_get_parent(struct rcg_clk *rcg, int has_mnd)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700224{
225 u32 n_regval = 0, m_regval = 0, d_regval = 0;
226 u32 cfg_regval;
227 struct clk_freq_tbl *freq;
228 u32 cmd_rcgr_regval;
229
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700230 /* Is there a pending configuration? */
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800231 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700232 if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800233 return NULL;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700234
235 /* Get values of m, n, d, div and src_sel registers. */
236 if (has_mnd) {
237 m_regval = readl_relaxed(M_REG(rcg));
238 n_regval = readl_relaxed(N_REG(rcg));
239 d_regval = readl_relaxed(D_REG(rcg));
240
241 /*
242 * The n and d values stored in the frequency tables are sign
243 * extended to 32 bits. The n and d values in the registers are
244 * sign extended to 8 or 16 bits. Sign extend the values read
245 * from the registers so that they can be compared to the
246 * values in the frequency tables.
247 */
248 n_regval |= (n_regval >> 8) ? BM(31, 16) : BM(31, 8);
249 d_regval |= (d_regval >> 8) ? BM(31, 16) : BM(31, 8);
250 }
251
252 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
253 cfg_regval &= CFG_RCGR_SRC_SEL_MASK | CFG_RCGR_DIV_MASK
254 | MND_MODE_MASK;
255
256 /* If mnd counter is present, check if it's in use. */
257 has_mnd = (has_mnd) &&
258 ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL);
259
260 /*
261 * Clear out the mn counter mode bits since we now want to compare only
262 * the source mux selection and pre-divider values in the registers.
263 */
264 cfg_regval &= ~MND_MODE_MASK;
265
266 /* Figure out what rate the rcg is running at */
267 for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
268 if (freq->div_src_val != cfg_regval)
269 continue;
270 if (has_mnd) {
271 if (freq->m_val != m_regval)
272 continue;
273 if (freq->n_val != n_regval)
274 continue;
275 if (freq->d_val != d_regval)
276 continue;
277 }
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700278 break;
279 }
280
281 /* No known frequency found */
282 if (freq->freq_hz == FREQ_END)
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800283 return NULL;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700284
285 rcg->current_freq = freq;
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800286 return freq->src_clk;
287}
288
289static enum handoff _rcg_clk_handoff(struct rcg_clk *rcg)
290{
291 u32 cmd_rcgr_regval;
292
293 if (rcg->current_freq && rcg->current_freq->freq_hz != FREQ_END)
294 rcg->c.rate = rcg->current_freq->freq_hz;
295
296 /* Is the root enabled? */
297 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
298 if ((cmd_rcgr_regval & CMD_RCGR_ROOT_STATUS_BIT))
299 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700300
301 return HANDOFF_ENABLED_CLK;
302}
303
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800304static struct clk *rcg_mnd_clk_get_parent(struct clk *c)
305{
306 return _rcg_clk_get_parent(to_rcg_clk(c), 1);
307}
308
309static struct clk *rcg_clk_get_parent(struct clk *c)
310{
311 return _rcg_clk_get_parent(to_rcg_clk(c), 0);
312}
313
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700314static enum handoff rcg_mnd_clk_handoff(struct clk *c)
315{
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800316 return _rcg_clk_handoff(to_rcg_clk(c));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700317}
318
319static enum handoff rcg_clk_handoff(struct clk *c)
320{
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800321 return _rcg_clk_handoff(to_rcg_clk(c));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700322}
323
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700324#define BRANCH_CHECK_MASK BM(31, 28)
325#define BRANCH_ON_VAL BVAL(31, 28, 0x0)
326#define BRANCH_OFF_VAL BVAL(31, 28, 0x8)
327#define BRANCH_NOC_FSM_ON_VAL BVAL(31, 28, 0x2)
328
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700329/*
330 * Branch clock functions
331 */
332static void branch_clk_halt_check(u32 halt_check, const char *clk_name,
333 void __iomem *cbcr_reg,
334 enum branch_state br_status)
335{
Vikram Mulukutla86b9fa62012-05-02 16:39:14 -0700336 char *status_str = (br_status == BRANCH_ON) ? "off" : "on";
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700337
338 /*
339 * Use a memory barrier since some halt status registers are
340 * not within the same 1K segment as the branch/root enable
341 * registers. It's also needed in the udelay() case to ensure
342 * the delay starts after the branch disable.
343 */
344 mb();
345
346 if (halt_check == DELAY || halt_check == HALT_VOTED) {
347 udelay(HALT_CHECK_DELAY_US);
348 } else if (halt_check == HALT) {
349 int count;
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700350 u32 val;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700351 for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700352 val = readl_relaxed(cbcr_reg);
353 val &= BRANCH_CHECK_MASK;
354 switch (br_status) {
355 case BRANCH_ON:
356 if (val == BRANCH_ON_VAL
357 || val == BRANCH_NOC_FSM_ON_VAL)
358 return;
359 break;
360
361 case BRANCH_OFF:
362 if (val == BRANCH_OFF_VAL)
363 return;
364 break;
365 };
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700366 udelay(1);
367 }
368 WARN(count == 0, "%s status stuck %s", clk_name, status_str);
369 }
370}
371
372static int branch_clk_enable(struct clk *c)
373{
374 unsigned long flags;
375 u32 cbcr_val;
376 struct branch_clk *branch = to_branch_clk(c);
377
378 spin_lock_irqsave(&local_clock_reg_lock, flags);
379 cbcr_val = readl_relaxed(CBCR_REG(branch));
380 cbcr_val |= CBCR_BRANCH_ENABLE_BIT;
381 writel_relaxed(cbcr_val, CBCR_REG(branch));
382 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
383
384 /* Wait for clock to enable before continuing. */
385 branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
386 CBCR_REG(branch), BRANCH_ON);
387
388 return 0;
389}
390
391static void branch_clk_disable(struct clk *c)
392{
393 unsigned long flags;
394 struct branch_clk *branch = to_branch_clk(c);
395 u32 reg_val;
396
397 spin_lock_irqsave(&local_clock_reg_lock, flags);
398 reg_val = readl_relaxed(CBCR_REG(branch));
399 reg_val &= ~CBCR_BRANCH_ENABLE_BIT;
400 writel_relaxed(reg_val, CBCR_REG(branch));
401 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
402
403 /* Wait for clock to disable before continuing. */
404 branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
405 CBCR_REG(branch), BRANCH_OFF);
406}
407
408static int branch_cdiv_set_rate(struct branch_clk *branch, unsigned long rate)
409{
410 unsigned long flags;
411 u32 regval;
412
413 if (rate > branch->max_div)
414 return -EINVAL;
415
416 spin_lock_irqsave(&local_clock_reg_lock, flags);
417 regval = readl_relaxed(CBCR_REG(branch));
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800418 regval &= ~BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
419 regval |= BVAL(CBCR_CDIV_MSB, CBCR_CDIV_LSB, rate);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700420 writel_relaxed(regval, CBCR_REG(branch));
421 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
422
423 return 0;
424}
425
426static int branch_clk_set_rate(struct clk *c, unsigned long rate)
427{
428 struct branch_clk *branch = to_branch_clk(c);
429
430 if (branch->max_div)
431 return branch_cdiv_set_rate(branch, rate);
432
433 if (!branch->has_sibling)
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700434 return clk_set_rate(c->parent, rate);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700435
436 return -EPERM;
437}
438
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700439static long branch_clk_round_rate(struct clk *c, unsigned long rate)
440{
441 struct branch_clk *branch = to_branch_clk(c);
442
443 if (branch->max_div)
444 return rate <= (branch->max_div) ? rate : -EPERM;
445
446 if (!branch->has_sibling)
Saravana Kannan7a6532e2012-10-18 20:51:13 -0700447 return clk_round_rate(c->parent, rate);
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700448
449 return -EPERM;
450}
451
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700452static unsigned long branch_clk_get_rate(struct clk *c)
453{
454 struct branch_clk *branch = to_branch_clk(c);
455
456 if (branch->max_div)
457 return branch->c.rate;
458
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800459 return clk_get_rate(c->parent);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700460}
461
Saravana Kannane02bd3a2013-07-02 10:31:18 -0700462static long branch_clk_list_rate(struct clk *c, unsigned n)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700463{
Patrick Daly13e22ed2012-10-11 14:31:11 -0700464 int level, fmax = 0, rate;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700465 struct branch_clk *branch = to_branch_clk(c);
Patrick Daly13e22ed2012-10-11 14:31:11 -0700466 struct clk *parent = c->parent;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700467
468 if (branch->has_sibling == 1)
469 return -ENXIO;
470
Patrick Daly13e22ed2012-10-11 14:31:11 -0700471 if (!parent || !parent->ops->list_rate)
472 return -ENXIO;
473
474 /* Find max frequency supported within voltage constraints. */
475 if (!parent->vdd_class) {
476 fmax = INT_MAX;
477 } else {
478 for (level = 0; level < parent->num_fmax; level++)
479 if (parent->fmax[level])
480 fmax = parent->fmax[level];
481 }
482
483 rate = parent->ops->list_rate(parent, n);
484 if (rate <= fmax)
485 return rate;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700486 else
Vikram Mulukutlae0589fc2012-09-20 12:19:16 -0700487 return -ENXIO;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700488}
489
490static enum handoff branch_clk_handoff(struct clk *c)
491{
492 struct branch_clk *branch = to_branch_clk(c);
493 u32 cbcr_regval;
494
495 cbcr_regval = readl_relaxed(CBCR_REG(branch));
496 if ((cbcr_regval & CBCR_BRANCH_OFF_BIT))
497 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700498
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800499 if (branch->max_div) {
500 cbcr_regval &= BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
501 cbcr_regval >>= CBCR_CDIV_LSB;
502 c->rate = cbcr_regval;
503 } else if (!branch->has_sibling) {
504 c->rate = clk_get_rate(c->parent);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700505 }
506
507 return HANDOFF_ENABLED_CLK;
508}
509
510static int __branch_clk_reset(void __iomem *bcr_reg,
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700511 enum clk_reset_action action)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700512{
513 int ret = 0;
514 unsigned long flags;
515 u32 reg_val;
516
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700517 spin_lock_irqsave(&local_clock_reg_lock, flags);
518 reg_val = readl_relaxed(bcr_reg);
519 switch (action) {
520 case CLK_RESET_ASSERT:
521 reg_val |= BCR_BLK_ARES_BIT;
522 break;
523 case CLK_RESET_DEASSERT:
524 reg_val &= ~BCR_BLK_ARES_BIT;
525 break;
526 default:
527 ret = -EINVAL;
528 }
529 writel_relaxed(reg_val, bcr_reg);
530 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
531
532 /* Make sure write is issued before returning. */
533 mb();
534
535 return ret;
536}
537
538static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
539{
540 struct branch_clk *branch = to_branch_clk(c);
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700541
Matt Wagantalld8d735d2013-05-20 20:12:08 -0700542 if (!branch->bcr_reg)
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700543 return -EPERM;
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700544 return __branch_clk_reset(BCR_REG(branch), action);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700545}
546
Matt Wagantall39f38d92012-01-20 18:48:05 -0800547static int branch_clk_set_flags(struct clk *c, unsigned flags)
548{
549 u32 cbcr_val;
550 unsigned long irq_flags;
551 struct branch_clk *branch = to_branch_clk(c);
Matt Wagantall86ed76a2013-08-05 12:03:29 -0700552 int delay_us = 0, ret = 0;
Matt Wagantall39f38d92012-01-20 18:48:05 -0800553
554 spin_lock_irqsave(&local_clock_reg_lock, irq_flags);
555 cbcr_val = readl_relaxed(CBCR_REG(branch));
556 switch (flags) {
557 case CLKFLAG_RETAIN_PERIPH:
558 cbcr_val |= BIT(13);
Matt Wagantall86ed76a2013-08-05 12:03:29 -0700559 delay_us = 1;
Matt Wagantall39f38d92012-01-20 18:48:05 -0800560 break;
561 case CLKFLAG_NORETAIN_PERIPH:
562 cbcr_val &= ~BIT(13);
563 break;
564 case CLKFLAG_RETAIN_MEM:
565 cbcr_val |= BIT(14);
Matt Wagantall86ed76a2013-08-05 12:03:29 -0700566 delay_us = 1;
Matt Wagantall39f38d92012-01-20 18:48:05 -0800567 break;
568 case CLKFLAG_NORETAIN_MEM:
569 cbcr_val &= ~BIT(14);
570 break;
571 default:
572 ret = -EINVAL;
573 }
574 writel_relaxed(cbcr_val, CBCR_REG(branch));
Matt Wagantall86ed76a2013-08-05 12:03:29 -0700575 /* Make sure power is enabled before returning. */
Matt Wagantall39f38d92012-01-20 18:48:05 -0800576 mb();
Matt Wagantall86ed76a2013-08-05 12:03:29 -0700577 udelay(delay_us);
578
579 spin_unlock_irqrestore(&local_clock_reg_lock, irq_flags);
Matt Wagantall39f38d92012-01-20 18:48:05 -0800580
581 return ret;
582}
583
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700584/*
585 * Voteable clock functions
586 */
587static int local_vote_clk_reset(struct clk *c, enum clk_reset_action action)
588{
Vikram Mulukutla27784c02012-06-06 13:37:36 -0700589 struct local_vote_clk *vclk = to_local_vote_clk(c);
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700590
591 if (!vclk->bcr_reg) {
592 WARN("clk_reset called on an unsupported clock (%s)\n",
593 c->dbg_name);
594 return -EPERM;
595 }
596 return __branch_clk_reset(BCR_REG(vclk), action);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700597}
598
599static int local_vote_clk_enable(struct clk *c)
600{
601 unsigned long flags;
602 u32 ena;
603 struct local_vote_clk *vclk = to_local_vote_clk(c);
604
605 spin_lock_irqsave(&local_clock_reg_lock, flags);
606 ena = readl_relaxed(VOTE_REG(vclk));
607 ena |= vclk->en_mask;
608 writel_relaxed(ena, VOTE_REG(vclk));
609 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
610
611 branch_clk_halt_check(vclk->halt_check, c->dbg_name, CBCR_REG(vclk),
612 BRANCH_ON);
613
614 return 0;
615}
616
617static void local_vote_clk_disable(struct clk *c)
618{
619 unsigned long flags;
620 u32 ena;
621 struct local_vote_clk *vclk = to_local_vote_clk(c);
622
623 spin_lock_irqsave(&local_clock_reg_lock, flags);
624 ena = readl_relaxed(VOTE_REG(vclk));
625 ena &= ~vclk->en_mask;
626 writel_relaxed(ena, VOTE_REG(vclk));
627 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
628}
629
630static enum handoff local_vote_clk_handoff(struct clk *c)
631{
632 struct local_vote_clk *vclk = to_local_vote_clk(c);
633 u32 vote_regval;
634
635 /* Is the branch voted on by apps? */
636 vote_regval = readl_relaxed(VOTE_REG(vclk));
637 if (!(vote_regval & vclk->en_mask))
638 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700639
640 return HANDOFF_ENABLED_CLK;
641}
642
Vikram Mulukutlae03b4b62013-03-20 17:45:37 -0700643enum handoff byte_rcg_handoff(struct clk *clk)
Patrick Dalyadeeb472013-03-06 21:22:32 -0800644{
645 struct rcg_clk *rcg = to_rcg_clk(clk);
646 u32 div_val;
647 unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
648
649 /* If the pre-divider is used, find the rate after the division */
650 div_val = readl_relaxed(CFG_RCGR_REG(rcg)) & CFG_RCGR_DIV_MASK;
651 if (div_val > 1)
652 pre_div_rate = parent_rate / ((div_val + 1) >> 1);
653 else
654 pre_div_rate = parent_rate;
655
656 clk->rate = pre_div_rate;
657
658 if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
659 return HANDOFF_DISABLED_CLK;
660
661 return HANDOFF_ENABLED_CLK;
662}
663
664static int set_rate_byte(struct clk *clk, unsigned long rate)
665{
666 struct rcg_clk *rcg = to_rcg_clk(clk);
667 struct clk *pll = clk->parent;
668 unsigned long source_rate, div;
669 struct clk_freq_tbl *byte_freq = rcg->current_freq;
670 int rc;
671
672 if (rate == 0)
673 return -EINVAL;
674
675 rc = clk_set_rate(pll, rate);
676 if (rc)
677 return rc;
678
679 source_rate = clk_round_rate(pll, rate);
680 if ((2 * source_rate) % rate)
681 return -EINVAL;
682
683 div = ((2 * source_rate)/rate) - 1;
684 if (div > CFG_RCGR_DIV_MASK)
685 return -EINVAL;
686
687 byte_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
688 byte_freq->div_src_val |= BVAL(4, 0, div);
689 set_rate_hid(rcg, byte_freq);
690
691 return 0;
692}
693
Vikram Mulukutlae03b4b62013-03-20 17:45:37 -0700694enum handoff pixel_rcg_handoff(struct clk *clk)
Patrick Dalyadeeb472013-03-06 21:22:32 -0800695{
696 struct rcg_clk *rcg = to_rcg_clk(clk);
Vikram Mulukutla4c93ce72013-05-02 20:16:49 -0700697 u32 div_val = 0, mval = 0, nval = 0, cfg_regval;
Patrick Dalyadeeb472013-03-06 21:22:32 -0800698 unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
699
700 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
701
702 /* If the pre-divider is used, find the rate after the division */
703 div_val = cfg_regval & CFG_RCGR_DIV_MASK;
704 if (div_val > 1)
705 pre_div_rate = parent_rate / ((div_val + 1) >> 1);
706 else
707 pre_div_rate = parent_rate;
708
709 clk->rate = pre_div_rate;
710
Vikram Mulukutla4c93ce72013-05-02 20:16:49 -0700711 /*
712 * Pixel clocks have one frequency entry in their frequency table.
713 * Update that entry.
714 */
715 if (rcg->current_freq) {
716 rcg->current_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
717 rcg->current_freq->div_src_val |= div_val;
718 }
719
Patrick Dalyadeeb472013-03-06 21:22:32 -0800720 /* If MND is used, find the rate after the MND division */
721 if ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL) {
722 mval = readl_relaxed(M_REG(rcg));
723 nval = readl_relaxed(N_REG(rcg));
724 if (!nval)
725 return HANDOFF_DISABLED_CLK;
726 nval = (~nval) + mval;
Vikram Mulukutla4c93ce72013-05-02 20:16:49 -0700727 if (rcg->current_freq) {
728 rcg->current_freq->n_val = ~(nval - mval);
729 rcg->current_freq->m_val = mval;
730 rcg->current_freq->d_val = ~nval;
731 }
Patrick Dalyadeeb472013-03-06 21:22:32 -0800732 clk->rate = (pre_div_rate * mval) / nval;
733 }
734
735 if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
736 return HANDOFF_DISABLED_CLK;
737
738 return HANDOFF_ENABLED_CLK;
739}
740
741static int set_rate_pixel(struct clk *clk, unsigned long rate)
742{
743 struct rcg_clk *rcg = to_rcg_clk(clk);
744 struct clk *pll = clk->parent;
745 unsigned long source_rate, div;
746 struct clk_freq_tbl *pixel_freq = rcg->current_freq;
747 int rc;
748
749 if (rate == 0)
750 return -EINVAL;
751
752 rc = clk_set_rate(pll, rate);
753 if (rc)
754 return rc;
755
756 source_rate = clk_round_rate(pll, rate);
757 if ((2 * source_rate) % rate)
758 return -EINVAL;
759
760 div = ((2 * source_rate)/rate) - 1;
761 if (div > CFG_RCGR_DIV_MASK)
762 return -EINVAL;
763
764 pixel_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
765 pixel_freq->div_src_val |= BVAL(4, 0, div);
766 set_rate_mnd(rcg, pixel_freq);
767
768 return 0;
769}
770
771/*
772 * Unlike other clocks, the HDMI rate is adjusted through PLL
773 * re-programming. It is also routed through an HID divider.
774 */
775static int rcg_clk_set_rate_hdmi(struct clk *c, unsigned long rate)
776{
Patrick Dalyadeeb472013-03-06 21:22:32 -0800777 struct rcg_clk *rcg = to_rcg_clk(c);
Ajay Singh Parmar3fc58de2013-09-03 17:19:16 -0700778 struct clk_freq_tbl *nf = rcg->freq_tbl;
Patrick Dalyadeeb472013-03-06 21:22:32 -0800779 int rc;
780
Patrick Dalyadeeb472013-03-06 21:22:32 -0800781 rc = clk_set_rate(nf->src_clk, rate);
782 if (rc < 0)
783 goto out;
784 set_rate_hid(rcg, nf);
785
786 rcg->current_freq = nf;
Patrick Dalyadeeb472013-03-06 21:22:32 -0800787out:
788 return rc;
789}
790
Ajay Singh Parmar3fc58de2013-09-03 17:19:16 -0700791static struct clk *rcg_hdmi_clk_get_parent(struct clk *c)
792{
793 struct rcg_clk *rcg = to_rcg_clk(c);
794 struct clk_freq_tbl *freq = rcg->freq_tbl;
795 u32 cmd_rcgr_regval;
796
797 /* Is there a pending configuration? */
798 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
799 if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
800 return NULL;
801
802 rcg->current_freq->freq_hz = clk_get_rate(c->parent);
803
804 return freq->src_clk;
805}
Patrick Dalyadeeb472013-03-06 21:22:32 -0800806
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800807#define ENABLE_REG(x) (*(x)->base + (x)->enable_reg)
808#define SELECT_REG(x) (*(x)->base + (x)->select_reg)
809
810/*
811 * mux clock functions
812 */
Vikram Mulukutla49423392013-05-02 09:03:02 -0700813static void cam_mux_clk_halt_check(void)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800814{
815 /* Ensure that the delay starts after the mux disable/enable. */
816 mb();
817 udelay(HALT_CHECK_DELAY_US);
818}
819
Vikram Mulukutla49423392013-05-02 09:03:02 -0700820static int cam_mux_clk_enable(struct clk *c)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800821{
822 unsigned long flags;
823 u32 regval;
Vikram Mulukutla49423392013-05-02 09:03:02 -0700824 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800825
826 spin_lock_irqsave(&local_clock_reg_lock, flags);
827 regval = readl_relaxed(ENABLE_REG(mux));
828 regval |= mux->enable_mask;
829 writel_relaxed(regval, ENABLE_REG(mux));
830 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
831
832 /* Wait for clock to enable before continuing. */
Vikram Mulukutla49423392013-05-02 09:03:02 -0700833 cam_mux_clk_halt_check();
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800834
835 return 0;
836}
837
Vikram Mulukutla49423392013-05-02 09:03:02 -0700838static void cam_mux_clk_disable(struct clk *c)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800839{
840 unsigned long flags;
Vikram Mulukutla49423392013-05-02 09:03:02 -0700841 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800842 u32 regval;
843
844 spin_lock_irqsave(&local_clock_reg_lock, flags);
845 regval = readl_relaxed(ENABLE_REG(mux));
846 regval &= ~mux->enable_mask;
847 writel_relaxed(regval, ENABLE_REG(mux));
848 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
849
850 /* Wait for clock to disable before continuing. */
Vikram Mulukutla49423392013-05-02 09:03:02 -0700851 cam_mux_clk_halt_check();
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800852}
853
Vikram Mulukutla49423392013-05-02 09:03:02 -0700854static int mux_source_switch(struct cam_mux_clk *mux, struct mux_source *dest)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800855{
856 unsigned long flags;
857 u32 regval;
858 int ret = 0;
859
860 ret = __clk_pre_reparent(&mux->c, dest->clk, &flags);
861 if (ret)
862 goto out;
863
864 regval = readl_relaxed(SELECT_REG(mux));
865 regval &= ~mux->select_mask;
866 regval |= dest->select_val;
867 writel_relaxed(regval, SELECT_REG(mux));
868
869 /* Make sure switch request goes through before proceeding. */
870 mb();
871
872 __clk_post_reparent(&mux->c, mux->c.parent, &flags);
873out:
874 return ret;
875}
876
Vikram Mulukutla49423392013-05-02 09:03:02 -0700877static int cam_mux_clk_set_parent(struct clk *c, struct clk *parent)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800878{
Vikram Mulukutla49423392013-05-02 09:03:02 -0700879 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800880 struct mux_source *dest = NULL;
881 int ret;
882
883 if (!mux->sources || !parent)
884 return -EPERM;
885
886 dest = mux->sources;
887
888 while (dest->clk) {
889 if (dest->clk == parent)
890 break;
891 dest++;
892 }
893
894 if (!dest->clk)
895 return -EPERM;
896
897 ret = mux_source_switch(mux, dest);
898 if (ret)
899 return ret;
900
901 mux->c.rate = clk_get_rate(dest->clk);
902
903 return 0;
904}
905
Vikram Mulukutla49423392013-05-02 09:03:02 -0700906static enum handoff cam_mux_clk_handoff(struct clk *c)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800907{
Vikram Mulukutla49423392013-05-02 09:03:02 -0700908 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800909 u32 mask = mux->enable_mask;
910 u32 regval = readl_relaxed(ENABLE_REG(mux));
911
912 c->rate = clk_get_rate(c->parent);
913
914 if (mask == (regval & mask))
915 return HANDOFF_ENABLED_CLK;
916
917 return HANDOFF_DISABLED_CLK;
918}
919
Vikram Mulukutla49423392013-05-02 09:03:02 -0700920static struct clk *cam_mux_clk_get_parent(struct clk *c)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800921{
Vikram Mulukutla49423392013-05-02 09:03:02 -0700922 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800923 struct mux_source *parent = NULL;
924 u32 regval = readl_relaxed(SELECT_REG(mux));
925
926 if (!mux->sources)
927 return ERR_PTR(-EPERM);
928
929 parent = mux->sources;
930
931 while (parent->clk) {
932 if ((regval & mux->select_mask) == parent->select_val)
933 return parent->clk;
934
935 parent++;
936 }
937
938 return ERR_PTR(-EPERM);
939}
940
Saravana Kannane02bd3a2013-07-02 10:31:18 -0700941static long cam_mux_clk_list_rate(struct clk *c, unsigned n)
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800942{
Vikram Mulukutla49423392013-05-02 09:03:02 -0700943 struct cam_mux_clk *mux = to_cam_mux_clk(c);
Vikram Mulukutla27859df2013-01-17 20:56:15 -0800944 int i;
945
946 for (i = 0; i < n; i++)
947 if (!mux->sources[i].clk)
948 break;
949
950 if (!mux->sources[i].clk)
951 return -ENXIO;
952
953 return clk_get_rate(mux->sources[i].clk);
954}
955
Matt Wagantalledf2fad2012-08-06 16:11:46 -0700956struct clk_ops clk_ops_empty;
957
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700958struct clk_ops clk_ops_rcg = {
Stephen Boydd86d1f22012-01-24 17:36:34 -0800959 .enable = rcg_clk_prepare,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700960 .set_rate = rcg_clk_set_rate,
961 .list_rate = rcg_clk_list_rate,
962 .round_rate = rcg_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700963 .handoff = rcg_clk_handoff,
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800964 .get_parent = rcg_clk_get_parent,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700965};
966
967struct clk_ops clk_ops_rcg_mnd = {
Stephen Boydd86d1f22012-01-24 17:36:34 -0800968 .enable = rcg_clk_prepare,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700969 .set_rate = rcg_clk_set_rate,
970 .list_rate = rcg_clk_list_rate,
971 .round_rate = rcg_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700972 .handoff = rcg_mnd_clk_handoff,
Saravana Kannanc85ecf92013-01-21 17:58:35 -0800973 .get_parent = rcg_mnd_clk_get_parent,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700974};
975
Patrick Dalyadeeb472013-03-06 21:22:32 -0800976struct clk_ops clk_ops_pixel = {
977 .enable = rcg_clk_prepare,
978 .set_rate = set_rate_pixel,
979 .list_rate = rcg_clk_list_rate,
980 .round_rate = rcg_clk_round_rate,
981 .handoff = pixel_rcg_handoff,
982};
983
984struct clk_ops clk_ops_byte = {
985 .enable = rcg_clk_prepare,
986 .set_rate = set_rate_byte,
987 .list_rate = rcg_clk_list_rate,
988 .round_rate = rcg_clk_round_rate,
989 .handoff = byte_rcg_handoff,
990};
991
992struct clk_ops clk_ops_rcg_hdmi = {
993 .enable = rcg_clk_prepare,
994 .set_rate = rcg_clk_set_rate_hdmi,
Patrick Dalyadeeb472013-03-06 21:22:32 -0800995 .list_rate = rcg_clk_list_rate,
996 .round_rate = rcg_clk_round_rate,
997 .handoff = rcg_clk_handoff,
Ajay Singh Parmar3fc58de2013-09-03 17:19:16 -0700998 .get_parent = rcg_hdmi_clk_get_parent,
Patrick Dalyadeeb472013-03-06 21:22:32 -0800999};
1000
Vikram Mulukutla8810e342011-10-20 20:26:53 -07001001struct clk_ops clk_ops_branch = {
1002 .enable = branch_clk_enable,
1003 .disable = branch_clk_disable,
Vikram Mulukutla8810e342011-10-20 20:26:53 -07001004 .set_rate = branch_clk_set_rate,
1005 .get_rate = branch_clk_get_rate,
1006 .list_rate = branch_clk_list_rate,
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -07001007 .round_rate = branch_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -07001008 .reset = branch_clk_reset,
Matt Wagantall39f38d92012-01-20 18:48:05 -08001009 .set_flags = branch_clk_set_flags,
Vikram Mulukutla8810e342011-10-20 20:26:53 -07001010 .handoff = branch_clk_handoff,
1011};
1012
1013struct clk_ops clk_ops_vote = {
1014 .enable = local_vote_clk_enable,
1015 .disable = local_vote_clk_disable,
Vikram Mulukutla8810e342011-10-20 20:26:53 -07001016 .reset = local_vote_clk_reset,
1017 .handoff = local_vote_clk_handoff,
1018};
Vikram Mulukutla27859df2013-01-17 20:56:15 -08001019
Vikram Mulukutla49423392013-05-02 09:03:02 -07001020struct clk_ops clk_ops_cam_mux = {
1021 .enable = cam_mux_clk_enable,
1022 .disable = cam_mux_clk_disable,
1023 .set_parent = cam_mux_clk_set_parent,
1024 .get_parent = cam_mux_clk_get_parent,
1025 .handoff = cam_mux_clk_handoff,
1026 .list_rate = cam_mux_clk_list_rate,
Vikram Mulukutla27859df2013-01-17 20:56:15 -08001027};
1028
1029