blob: d5831e23d635cb69c76ee5af625646a21d41a350 [file] [log] [blame]
Pankaj Kumar3912c982011-12-07 16:59:03 +05301/*
2 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/remote_spinlock.h>
19
Vikram Mulukutla681d8682012-03-09 23:56:20 -080020#include <mach/scm-io.h>
Pankaj Kumar3912c982011-12-07 16:59:03 +053021#include <mach/msm_iomap.h>
22
23#include "clock.h"
24#include "clock-pll.h"
25#include "smd_private.h"
26
Vikram Mulukutla681d8682012-03-09 23:56:20 -080027#ifdef CONFIG_MSM_SECURE_IO
28#undef readl_relaxed
29#undef writel_relaxed
30#define readl_relaxed secure_readl
31#define writel_relaxed secure_writel
32#endif
33
34#define PLL_OUTCTRL BIT(0)
35#define PLL_BYPASSNL BIT(1)
36#define PLL_RESET_N BIT(2)
37#define PLL_MODE_MASK BM(3, 0)
38
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070039#define PLL_EN_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->en_reg)) : \
40 ((x)->en_reg))
41#define PLL_STATUS_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->status_reg)) : \
42 ((x)->status_reg))
43#define PLL_MODE_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->mode_reg)) : \
44 ((x)->mode_reg))
Vikram Mulukutla5b146722012-04-23 18:17:50 -070045#define PLL_L_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->l_reg)) : \
46 ((x)->l_reg))
47#define PLL_M_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->m_reg)) : \
48 ((x)->m_reg))
49#define PLL_N_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->n_reg)) : \
50 ((x)->n_reg))
51#define PLL_CONFIG_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->config_reg)) : \
52 ((x)->config_reg))
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070053
Vikram Mulukutla681d8682012-03-09 23:56:20 -080054static DEFINE_SPINLOCK(pll_reg_lock);
55
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070056#define ENABLE_WAIT_MAX_LOOPS 200
57
Matt Wagantallf82f2942012-01-27 13:56:13 -080058int pll_vote_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -080059{
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070060 u32 ena, count;
Vikram Mulukutla681d8682012-03-09 23:56:20 -080061 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -080062 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -080063
64 spin_lock_irqsave(&pll_reg_lock, flags);
Matt Wagantallf82f2942012-01-27 13:56:13 -080065 ena = readl_relaxed(PLL_EN_REG(pllv));
66 ena |= pllv->en_mask;
67 writel_relaxed(ena, PLL_EN_REG(pllv));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080068 spin_unlock_irqrestore(&pll_reg_lock, flags);
69
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070070 /*
71 * Use a memory barrier since some PLL status registers are
72 * not within the same 1K segment as the voting registers.
73 */
74 mb();
Vikram Mulukutla681d8682012-03-09 23:56:20 -080075
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070076 /* Wait for pll to enable. */
77 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
Matt Wagantallf82f2942012-01-27 13:56:13 -080078 if (readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask)
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070079 return 0;
80 udelay(1);
81 }
82
Matt Wagantallf82f2942012-01-27 13:56:13 -080083 WARN("PLL %s didn't enable after voting for it!\n", c->dbg_name);
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070084
85 return -ETIMEDOUT;
Vikram Mulukutla681d8682012-03-09 23:56:20 -080086}
87
Matt Wagantallf82f2942012-01-27 13:56:13 -080088void pll_vote_clk_disable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -080089{
90 u32 ena;
91 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -080092 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -080093
94 spin_lock_irqsave(&pll_reg_lock, flags);
Matt Wagantallf82f2942012-01-27 13:56:13 -080095 ena = readl_relaxed(PLL_EN_REG(pllv));
96 ena &= ~(pllv->en_mask);
97 writel_relaxed(ena, PLL_EN_REG(pllv));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080098 spin_unlock_irqrestore(&pll_reg_lock, flags);
99}
100
Matt Wagantallf82f2942012-01-27 13:56:13 -0800101struct clk *pll_vote_clk_get_parent(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800102{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800103 return to_pll_vote_clk(c)->parent;
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800104}
105
Matt Wagantallf82f2942012-01-27 13:56:13 -0800106int pll_vote_clk_is_enabled(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800107{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800108 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
109 return !!(readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800110}
111
Matt Wagantallf82f2942012-01-27 13:56:13 -0800112static enum handoff pll_vote_clk_handoff(struct clk *c)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700113{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800114 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
115 if (readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700116 return HANDOFF_ENABLED_CLK;
117
118 return HANDOFF_DISABLED_CLK;
119}
120
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800121struct clk_ops clk_ops_pll_vote = {
122 .enable = pll_vote_clk_enable,
123 .disable = pll_vote_clk_disable,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800124 .is_enabled = pll_vote_clk_is_enabled,
125 .get_parent = pll_vote_clk_get_parent,
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700126 .handoff = pll_vote_clk_handoff,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800127};
128
129static void __pll_clk_enable_reg(void __iomem *mode_reg)
130{
131 u32 mode = readl_relaxed(mode_reg);
132 /* Disable PLL bypass mode. */
133 mode |= PLL_BYPASSNL;
134 writel_relaxed(mode, mode_reg);
135
136 /*
137 * H/W requires a 5us delay between disabling the bypass and
138 * de-asserting the reset. Delay 10us just to be safe.
139 */
140 mb();
141 udelay(10);
142
143 /* De-assert active-low PLL reset. */
144 mode |= PLL_RESET_N;
145 writel_relaxed(mode, mode_reg);
146
147 /* Wait until PLL is locked. */
148 mb();
149 udelay(50);
150
151 /* Enable PLL output. */
152 mode |= PLL_OUTCTRL;
153 writel_relaxed(mode, mode_reg);
154
155 /* Ensure that the write above goes through before returning. */
156 mb();
157}
158
Matt Wagantallf82f2942012-01-27 13:56:13 -0800159static int local_pll_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800160{
161 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800162 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800163
164 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700165 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800166 spin_unlock_irqrestore(&pll_reg_lock, flags);
167
168 return 0;
169}
170
171static void __pll_clk_disable_reg(void __iomem *mode_reg)
172{
173 u32 mode = readl_relaxed(mode_reg);
174 mode &= ~PLL_MODE_MASK;
175 writel_relaxed(mode, mode_reg);
176}
177
Matt Wagantallf82f2942012-01-27 13:56:13 -0800178static void local_pll_clk_disable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800179{
180 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800181 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800182
183 /*
184 * Disable the PLL output, disable test mode, enable
185 * the bypass mode, and assert the reset.
186 */
187 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700188 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800189 spin_unlock_irqrestore(&pll_reg_lock, flags);
190}
191
Matt Wagantallf82f2942012-01-27 13:56:13 -0800192static enum handoff local_pll_clk_handoff(struct clk *c)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700193{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800194 struct pll_clk *pll = to_pll_clk(c);
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700195 u32 mode = readl_relaxed(PLL_MODE_REG(pll));
196 u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
197
198 if ((mode & mask) == mask)
199 return HANDOFF_ENABLED_CLK;
200
201 return HANDOFF_DISABLED_CLK;
202}
203
Matt Wagantallf82f2942012-01-27 13:56:13 -0800204static struct clk *local_pll_clk_get_parent(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800205{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800206 return to_pll_clk(c)->parent;
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800207}
208
Matt Wagantallf82f2942012-01-27 13:56:13 -0800209int sr_pll_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800210{
211 u32 mode;
212 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800213 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800214
215 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700216 mode = readl_relaxed(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800217 /* De-assert active-low PLL reset. */
218 mode |= PLL_RESET_N;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700219 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800220
221 /*
222 * H/W requires a 5us delay between disabling the bypass and
223 * de-asserting the reset. Delay 10us just to be safe.
224 */
225 mb();
226 udelay(10);
227
228 /* Disable PLL bypass mode. */
229 mode |= PLL_BYPASSNL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700230 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800231
232 /* Wait until PLL is locked. */
233 mb();
234 udelay(60);
235
236 /* Enable PLL output. */
237 mode |= PLL_OUTCTRL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700238 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800239
240 /* Ensure that the write above goes through before returning. */
241 mb();
242
243 spin_unlock_irqrestore(&pll_reg_lock, flags);
244
245 return 0;
246}
247
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700248#define PLL_LOCKED_BIT BIT(16)
249
Abhimanyu Kapur90ced6e2012-06-26 17:41:25 -0700250int msm8974_pll_clk_enable(struct clk *c)
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700251{
252 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800253 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700254 u32 count, mode;
255 int ret = 0;
256
257 spin_lock_irqsave(&pll_reg_lock, flags);
258 mode = readl_relaxed(PLL_MODE_REG(pll));
259 /* Disable PLL bypass mode. */
Matt Wagantall10dde282012-05-14 19:30:11 -0700260 mode |= PLL_BYPASSNL;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700261 writel_relaxed(mode, PLL_MODE_REG(pll));
262
263 /*
264 * H/W requires a 5us delay between disabling the bypass and
265 * de-asserting the reset. Delay 10us just to be safe.
266 */
267 mb();
268 udelay(10);
269
270 /* De-assert active-low PLL reset. */
Matt Wagantall10dde282012-05-14 19:30:11 -0700271 mode |= PLL_RESET_N;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700272 writel_relaxed(mode, PLL_MODE_REG(pll));
273
274 /* Wait for pll to enable. */
275 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
276 if (readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)
277 break;
278 udelay(1);
279 }
280
281 if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)) {
Matt Wagantallf82f2942012-01-27 13:56:13 -0800282 WARN("PLL %s didn't lock after enabling it!\n", c->dbg_name);
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700283 ret = -ETIMEDOUT;
284 goto out;
285 }
286
287 /* Enable PLL output. */
Matt Wagantall10dde282012-05-14 19:30:11 -0700288 mode |= PLL_OUTCTRL;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700289 writel_relaxed(mode, PLL_MODE_REG(pll));
290
291 /* Ensure the write above goes through before returning. */
292 mb();
293
294out:
295 spin_unlock_irqrestore(&pll_reg_lock, flags);
296 return ret;
297}
298
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800299struct clk_ops clk_ops_local_pll = {
300 .enable = local_pll_clk_enable,
301 .disable = local_pll_clk_disable,
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700302 .handoff = local_pll_clk_handoff,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800303 .get_parent = local_pll_clk_get_parent,
304};
305
Pankaj Kumar3912c982011-12-07 16:59:03 +0530306struct pll_rate {
307 unsigned int lvalue;
308 unsigned long rate;
309};
310
311static struct pll_rate pll_l_rate[] = {
312 {10, 196000000},
313 {12, 245760000},
314 {30, 589820000},
315 {38, 737280000},
316 {41, 800000000},
317 {50, 960000000},
318 {52, 1008000000},
Trilok Soni48631722012-05-17 20:56:42 +0530319 {60, 1152000000},
Pankaj Kumar3912c982011-12-07 16:59:03 +0530320 {62, 1200000000},
Pankaj Kumar50c705c2012-01-10 12:02:07 +0530321 {63, 1209600000},
Kaushal Kumar86473f02012-06-28 19:35:58 +0530322 {73, 1401600000},
Pankaj Kumar3912c982011-12-07 16:59:03 +0530323 {0, 0},
324};
325
326#define PLL_BASE 7
327
328struct shared_pll_control {
329 uint32_t version;
330 struct {
331 /*
332 * Denotes if the PLL is ON. Technically, this can be read
333 * directly from the PLL registers, but this feild is here,
334 * so let's use it.
335 */
336 uint32_t on;
337 /*
338 * One bit for each processor core. The application processor
339 * is allocated bit position 1. All other bits should be
340 * considered as votes from other processors.
341 */
342 uint32_t votes;
343 } pll[PLL_BASE + PLL_END];
344};
345
346static remote_spinlock_t pll_lock;
347static struct shared_pll_control *pll_control;
348
349void __init msm_shared_pll_control_init(void)
350{
351#define PLL_REMOTE_SPINLOCK_ID "S:7"
352 unsigned smem_size;
353
354 remote_spin_lock_init(&pll_lock, PLL_REMOTE_SPINLOCK_ID);
355
356 pll_control = smem_get_entry(SMEM_CLKREGIM_SOURCES, &smem_size);
357 if (!pll_control) {
358 pr_err("Can't find shared PLL control data structure!\n");
359 BUG();
360 /*
361 * There might be more PLLs than what the application processor knows
362 * about. But the index used for each PLL is guaranteed to remain the
363 * same.
364 */
365 } else if (smem_size < sizeof(struct shared_pll_control)) {
366 pr_err("Shared PLL control data"
367 "structure too small!\n");
368 BUG();
369 } else if (pll_control->version != 0xCCEE0001) {
370 pr_err("Shared PLL control version mismatch!\n");
371 BUG();
372 } else {
373 pr_info("Shared PLL control available.\n");
374 return;
375 }
376
377}
378
Matt Wagantallf82f2942012-01-27 13:56:13 -0800379static int pll_clk_enable(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530380{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800381 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530382 unsigned int pll_id = pll->id;
383
384 remote_spin_lock(&pll_lock);
385
386 pll_control->pll[PLL_BASE + pll_id].votes |= BIT(1);
387 if (!pll_control->pll[PLL_BASE + pll_id].on) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700388 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530389 pll_control->pll[PLL_BASE + pll_id].on = 1;
390 }
391
392 remote_spin_unlock(&pll_lock);
393 return 0;
394}
395
Matt Wagantallf82f2942012-01-27 13:56:13 -0800396static void pll_clk_disable(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530397{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800398 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530399 unsigned int pll_id = pll->id;
400
401 remote_spin_lock(&pll_lock);
402
403 pll_control->pll[PLL_BASE + pll_id].votes &= ~BIT(1);
404 if (pll_control->pll[PLL_BASE + pll_id].on
405 && !pll_control->pll[PLL_BASE + pll_id].votes) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700406 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530407 pll_control->pll[PLL_BASE + pll_id].on = 0;
408 }
409
410 remote_spin_unlock(&pll_lock);
411}
412
Matt Wagantallf82f2942012-01-27 13:56:13 -0800413static int pll_clk_is_enabled(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530414{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800415 return readl_relaxed(PLL_MODE_REG(to_pll_shared_clk(c))) & BIT(0);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530416}
417
Matt Wagantallf82f2942012-01-27 13:56:13 -0800418static enum handoff pll_clk_handoff(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530419{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800420 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530421 unsigned int pll_lval;
422 struct pll_rate *l;
423
424 /*
425 * Wait for the PLLs to be initialized and then read their frequency.
426 */
427 do {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700428 pll_lval = readl_relaxed(PLL_MODE_REG(pll) + 4) & 0x3ff;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530429 cpu_relax();
430 udelay(50);
431 } while (pll_lval == 0);
432
433 /* Convert PLL L values to PLL Output rate */
434 for (l = pll_l_rate; l->rate != 0; l++) {
435 if (l->lvalue == pll_lval) {
Matt Wagantallf82f2942012-01-27 13:56:13 -0800436 c->rate = l->rate;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530437 break;
438 }
439 }
440
Matt Wagantallf82f2942012-01-27 13:56:13 -0800441 if (!c->rate) {
Pankaj Kumar3912c982011-12-07 16:59:03 +0530442 pr_crit("Unknown PLL's L value!\n");
443 BUG();
444 }
445
Matt Wagantalla15833b2012-04-03 11:00:56 -0700446 return HANDOFF_ENABLED_CLK;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530447}
448
Matt Wagantallae053222012-05-14 19:42:07 -0700449struct clk_ops clk_ops_pll = {
Pankaj Kumar3912c982011-12-07 16:59:03 +0530450 .enable = pll_clk_enable,
451 .disable = pll_clk_disable,
452 .handoff = pll_clk_handoff,
Pankaj Kumar3912c982011-12-07 16:59:03 +0530453 .is_enabled = pll_clk_is_enabled,
454};
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700455
456static void __init __set_fsm_mode(void __iomem *mode_reg)
457{
458 u32 regval = readl_relaxed(mode_reg);
459
460 /* De-assert reset to FSM */
461 regval &= ~BIT(21);
462 writel_relaxed(regval, mode_reg);
463
464 /* Program bias count */
465 regval &= ~BM(19, 14);
466 regval |= BVAL(19, 14, 0x1);
467 writel_relaxed(regval, mode_reg);
468
469 /* Program lock count */
470 regval &= ~BM(13, 8);
471 regval |= BVAL(13, 8, 0x8);
472 writel_relaxed(regval, mode_reg);
473
474 /* Enable PLL FSM voting */
475 regval |= BIT(20);
476 writel_relaxed(regval, mode_reg);
477}
478
479void __init configure_pll(struct pll_config *config,
480 struct pll_config_regs *regs, u32 ena_fsm_mode)
481{
482 u32 regval;
483
484 writel_relaxed(config->l, PLL_L_REG(regs));
485 writel_relaxed(config->m, PLL_M_REG(regs));
486 writel_relaxed(config->n, PLL_N_REG(regs));
487
488 regval = readl_relaxed(PLL_CONFIG_REG(regs));
489
490 /* Enable the MN accumulator */
491 if (config->mn_ena_mask) {
492 regval &= ~config->mn_ena_mask;
493 regval |= config->mn_ena_val;
494 }
495
496 /* Enable the main output */
497 if (config->main_output_mask) {
498 regval &= ~config->main_output_mask;
499 regval |= config->main_output_val;
500 }
501
502 /* Set pre-divider and post-divider values */
503 regval &= ~config->pre_div_mask;
504 regval |= config->pre_div_val;
505 regval &= ~config->post_div_mask;
506 regval |= config->post_div_val;
507
508 /* Select VCO setting */
509 regval &= ~config->vco_mask;
510 regval |= config->vco_val;
511 writel_relaxed(regval, PLL_CONFIG_REG(regs));
512
513 /* Configure in FSM mode if necessary */
514 if (ena_fsm_mode)
515 __set_fsm_mode(PLL_MODE_REG(regs));
516}