blob: 2938135563585c8c93d4d7319592704886d9ad75 [file] [log] [blame]
Pankaj Kumar3912c982011-12-07 16:59:03 +05301/*
2 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/remote_spinlock.h>
19
Vikram Mulukutla681d8682012-03-09 23:56:20 -080020#include <mach/scm-io.h>
Pankaj Kumar3912c982011-12-07 16:59:03 +053021#include <mach/msm_iomap.h>
22
23#include "clock.h"
24#include "clock-pll.h"
25#include "smd_private.h"
26
Vikram Mulukutla681d8682012-03-09 23:56:20 -080027#ifdef CONFIG_MSM_SECURE_IO
28#undef readl_relaxed
29#undef writel_relaxed
30#define readl_relaxed secure_readl
31#define writel_relaxed secure_writel
32#endif
33
34#define PLL_OUTCTRL BIT(0)
35#define PLL_BYPASSNL BIT(1)
36#define PLL_RESET_N BIT(2)
37#define PLL_MODE_MASK BM(3, 0)
38
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070039#define PLL_EN_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->en_reg)) : \
40 ((x)->en_reg))
41#define PLL_STATUS_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->status_reg)) : \
42 ((x)->status_reg))
43#define PLL_MODE_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->mode_reg)) : \
44 ((x)->mode_reg))
Vikram Mulukutla5b146722012-04-23 18:17:50 -070045#define PLL_L_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->l_reg)) : \
46 ((x)->l_reg))
47#define PLL_M_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->m_reg)) : \
48 ((x)->m_reg))
49#define PLL_N_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->n_reg)) : \
50 ((x)->n_reg))
51#define PLL_CONFIG_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->config_reg)) : \
52 ((x)->config_reg))
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070053
Vikram Mulukutla681d8682012-03-09 23:56:20 -080054static DEFINE_SPINLOCK(pll_reg_lock);
55
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070056#define ENABLE_WAIT_MAX_LOOPS 200
57
Matt Wagantallf82f2942012-01-27 13:56:13 -080058int pll_vote_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -080059{
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070060 u32 ena, count;
Vikram Mulukutla681d8682012-03-09 23:56:20 -080061 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -080062 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -080063
64 spin_lock_irqsave(&pll_reg_lock, flags);
Matt Wagantallf82f2942012-01-27 13:56:13 -080065 ena = readl_relaxed(PLL_EN_REG(pllv));
66 ena |= pllv->en_mask;
67 writel_relaxed(ena, PLL_EN_REG(pllv));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080068 spin_unlock_irqrestore(&pll_reg_lock, flags);
69
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070070 /*
71 * Use a memory barrier since some PLL status registers are
72 * not within the same 1K segment as the voting registers.
73 */
74 mb();
Vikram Mulukutla681d8682012-03-09 23:56:20 -080075
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070076 /* Wait for pll to enable. */
77 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
Matt Wagantallf82f2942012-01-27 13:56:13 -080078 if (readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask)
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070079 return 0;
80 udelay(1);
81 }
82
Matt Wagantallf82f2942012-01-27 13:56:13 -080083 WARN("PLL %s didn't enable after voting for it!\n", c->dbg_name);
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070084
85 return -ETIMEDOUT;
Vikram Mulukutla681d8682012-03-09 23:56:20 -080086}
87
Matt Wagantallf82f2942012-01-27 13:56:13 -080088void pll_vote_clk_disable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -080089{
90 u32 ena;
91 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -080092 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -080093
94 spin_lock_irqsave(&pll_reg_lock, flags);
Matt Wagantallf82f2942012-01-27 13:56:13 -080095 ena = readl_relaxed(PLL_EN_REG(pllv));
96 ena &= ~(pllv->en_mask);
97 writel_relaxed(ena, PLL_EN_REG(pllv));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080098 spin_unlock_irqrestore(&pll_reg_lock, flags);
99}
100
Matt Wagantallf82f2942012-01-27 13:56:13 -0800101struct clk *pll_vote_clk_get_parent(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800102{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800103 return to_pll_vote_clk(c)->parent;
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800104}
105
Matt Wagantallf82f2942012-01-27 13:56:13 -0800106int pll_vote_clk_is_enabled(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800107{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800108 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
109 return !!(readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800110}
111
Matt Wagantallf82f2942012-01-27 13:56:13 -0800112static enum handoff pll_vote_clk_handoff(struct clk *c)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700113{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800114 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
115 if (readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700116 return HANDOFF_ENABLED_CLK;
117
118 return HANDOFF_DISABLED_CLK;
119}
120
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800121struct clk_ops clk_ops_pll_vote = {
122 .enable = pll_vote_clk_enable,
123 .disable = pll_vote_clk_disable,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800124 .is_enabled = pll_vote_clk_is_enabled,
125 .get_parent = pll_vote_clk_get_parent,
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700126 .handoff = pll_vote_clk_handoff,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800127};
128
129static void __pll_clk_enable_reg(void __iomem *mode_reg)
130{
131 u32 mode = readl_relaxed(mode_reg);
132 /* Disable PLL bypass mode. */
133 mode |= PLL_BYPASSNL;
134 writel_relaxed(mode, mode_reg);
135
136 /*
137 * H/W requires a 5us delay between disabling the bypass and
138 * de-asserting the reset. Delay 10us just to be safe.
139 */
140 mb();
141 udelay(10);
142
143 /* De-assert active-low PLL reset. */
144 mode |= PLL_RESET_N;
145 writel_relaxed(mode, mode_reg);
146
147 /* Wait until PLL is locked. */
148 mb();
149 udelay(50);
150
151 /* Enable PLL output. */
152 mode |= PLL_OUTCTRL;
153 writel_relaxed(mode, mode_reg);
154
155 /* Ensure that the write above goes through before returning. */
156 mb();
157}
158
Matt Wagantallf82f2942012-01-27 13:56:13 -0800159static int local_pll_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800160{
161 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800162 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800163
164 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700165 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800166 spin_unlock_irqrestore(&pll_reg_lock, flags);
167
168 return 0;
169}
170
171static void __pll_clk_disable_reg(void __iomem *mode_reg)
172{
173 u32 mode = readl_relaxed(mode_reg);
174 mode &= ~PLL_MODE_MASK;
175 writel_relaxed(mode, mode_reg);
176}
177
Matt Wagantallf82f2942012-01-27 13:56:13 -0800178static void local_pll_clk_disable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800179{
180 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800181 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800182
183 /*
184 * Disable the PLL output, disable test mode, enable
185 * the bypass mode, and assert the reset.
186 */
187 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700188 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800189 spin_unlock_irqrestore(&pll_reg_lock, flags);
190}
191
Matt Wagantallf82f2942012-01-27 13:56:13 -0800192static enum handoff local_pll_clk_handoff(struct clk *c)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700193{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800194 struct pll_clk *pll = to_pll_clk(c);
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700195 u32 mode = readl_relaxed(PLL_MODE_REG(pll));
196 u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
197
198 if ((mode & mask) == mask)
199 return HANDOFF_ENABLED_CLK;
200
201 return HANDOFF_DISABLED_CLK;
202}
203
Matt Wagantallf82f2942012-01-27 13:56:13 -0800204static struct clk *local_pll_clk_get_parent(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800205{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800206 return to_pll_clk(c)->parent;
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800207}
208
Matt Wagantallf82f2942012-01-27 13:56:13 -0800209int sr_pll_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800210{
211 u32 mode;
212 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800213 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800214
215 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700216 mode = readl_relaxed(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800217 /* De-assert active-low PLL reset. */
218 mode |= PLL_RESET_N;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700219 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800220
221 /*
222 * H/W requires a 5us delay between disabling the bypass and
223 * de-asserting the reset. Delay 10us just to be safe.
224 */
225 mb();
226 udelay(10);
227
228 /* Disable PLL bypass mode. */
229 mode |= PLL_BYPASSNL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700230 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800231
232 /* Wait until PLL is locked. */
233 mb();
234 udelay(60);
235
236 /* Enable PLL output. */
237 mode |= PLL_OUTCTRL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700238 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800239
240 /* Ensure that the write above goes through before returning. */
241 mb();
242
243 spin_unlock_irqrestore(&pll_reg_lock, flags);
244
245 return 0;
246}
247
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700248#define PLL_LOCKED_BIT BIT(16)
249
Abhimanyu Kapur90ced6e2012-06-26 17:41:25 -0700250int msm8974_pll_clk_enable(struct clk *c)
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700251{
252 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800253 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700254 u32 count, mode;
255 int ret = 0;
256
257 spin_lock_irqsave(&pll_reg_lock, flags);
258 mode = readl_relaxed(PLL_MODE_REG(pll));
259 /* Disable PLL bypass mode. */
Matt Wagantall10dde282012-05-14 19:30:11 -0700260 mode |= PLL_BYPASSNL;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700261 writel_relaxed(mode, PLL_MODE_REG(pll));
262
263 /*
264 * H/W requires a 5us delay between disabling the bypass and
265 * de-asserting the reset. Delay 10us just to be safe.
266 */
267 mb();
268 udelay(10);
269
270 /* De-assert active-low PLL reset. */
Matt Wagantall10dde282012-05-14 19:30:11 -0700271 mode |= PLL_RESET_N;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700272 writel_relaxed(mode, PLL_MODE_REG(pll));
273
274 /* Wait for pll to enable. */
275 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
276 if (readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)
277 break;
278 udelay(1);
279 }
280
281 if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)) {
Matt Wagantallf82f2942012-01-27 13:56:13 -0800282 WARN("PLL %s didn't lock after enabling it!\n", c->dbg_name);
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700283 ret = -ETIMEDOUT;
284 goto out;
285 }
286
287 /* Enable PLL output. */
Matt Wagantall10dde282012-05-14 19:30:11 -0700288 mode |= PLL_OUTCTRL;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700289 writel_relaxed(mode, PLL_MODE_REG(pll));
290
291 /* Ensure the write above goes through before returning. */
292 mb();
293
294out:
295 spin_unlock_irqrestore(&pll_reg_lock, flags);
296 return ret;
297}
298
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800299struct clk_ops clk_ops_local_pll = {
300 .enable = local_pll_clk_enable,
301 .disable = local_pll_clk_disable,
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700302 .handoff = local_pll_clk_handoff,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800303 .get_parent = local_pll_clk_get_parent,
304};
305
Pankaj Kumar3912c982011-12-07 16:59:03 +0530306struct pll_rate {
307 unsigned int lvalue;
308 unsigned long rate;
309};
310
311static struct pll_rate pll_l_rate[] = {
312 {10, 196000000},
313 {12, 245760000},
314 {30, 589820000},
315 {38, 737280000},
316 {41, 800000000},
317 {50, 960000000},
318 {52, 1008000000},
Trilok Soni48631722012-05-17 20:56:42 +0530319 {60, 1152000000},
Pankaj Kumar3912c982011-12-07 16:59:03 +0530320 {62, 1200000000},
Pankaj Kumar50c705c2012-01-10 12:02:07 +0530321 {63, 1209600000},
Pankaj Kumar3912c982011-12-07 16:59:03 +0530322 {0, 0},
323};
324
325#define PLL_BASE 7
326
327struct shared_pll_control {
328 uint32_t version;
329 struct {
330 /*
331 * Denotes if the PLL is ON. Technically, this can be read
332 * directly from the PLL registers, but this feild is here,
333 * so let's use it.
334 */
335 uint32_t on;
336 /*
337 * One bit for each processor core. The application processor
338 * is allocated bit position 1. All other bits should be
339 * considered as votes from other processors.
340 */
341 uint32_t votes;
342 } pll[PLL_BASE + PLL_END];
343};
344
345static remote_spinlock_t pll_lock;
346static struct shared_pll_control *pll_control;
347
348void __init msm_shared_pll_control_init(void)
349{
350#define PLL_REMOTE_SPINLOCK_ID "S:7"
351 unsigned smem_size;
352
353 remote_spin_lock_init(&pll_lock, PLL_REMOTE_SPINLOCK_ID);
354
355 pll_control = smem_get_entry(SMEM_CLKREGIM_SOURCES, &smem_size);
356 if (!pll_control) {
357 pr_err("Can't find shared PLL control data structure!\n");
358 BUG();
359 /*
360 * There might be more PLLs than what the application processor knows
361 * about. But the index used for each PLL is guaranteed to remain the
362 * same.
363 */
364 } else if (smem_size < sizeof(struct shared_pll_control)) {
365 pr_err("Shared PLL control data"
366 "structure too small!\n");
367 BUG();
368 } else if (pll_control->version != 0xCCEE0001) {
369 pr_err("Shared PLL control version mismatch!\n");
370 BUG();
371 } else {
372 pr_info("Shared PLL control available.\n");
373 return;
374 }
375
376}
377
Matt Wagantallf82f2942012-01-27 13:56:13 -0800378static int pll_clk_enable(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530379{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800380 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530381 unsigned int pll_id = pll->id;
382
383 remote_spin_lock(&pll_lock);
384
385 pll_control->pll[PLL_BASE + pll_id].votes |= BIT(1);
386 if (!pll_control->pll[PLL_BASE + pll_id].on) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700387 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530388 pll_control->pll[PLL_BASE + pll_id].on = 1;
389 }
390
391 remote_spin_unlock(&pll_lock);
392 return 0;
393}
394
Matt Wagantallf82f2942012-01-27 13:56:13 -0800395static void pll_clk_disable(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530396{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800397 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530398 unsigned int pll_id = pll->id;
399
400 remote_spin_lock(&pll_lock);
401
402 pll_control->pll[PLL_BASE + pll_id].votes &= ~BIT(1);
403 if (pll_control->pll[PLL_BASE + pll_id].on
404 && !pll_control->pll[PLL_BASE + pll_id].votes) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700405 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530406 pll_control->pll[PLL_BASE + pll_id].on = 0;
407 }
408
409 remote_spin_unlock(&pll_lock);
410}
411
Matt Wagantallf82f2942012-01-27 13:56:13 -0800412static int pll_clk_is_enabled(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530413{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800414 return readl_relaxed(PLL_MODE_REG(to_pll_shared_clk(c))) & BIT(0);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530415}
416
Matt Wagantallf82f2942012-01-27 13:56:13 -0800417static enum handoff pll_clk_handoff(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530418{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800419 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530420 unsigned int pll_lval;
421 struct pll_rate *l;
422
423 /*
424 * Wait for the PLLs to be initialized and then read their frequency.
425 */
426 do {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700427 pll_lval = readl_relaxed(PLL_MODE_REG(pll) + 4) & 0x3ff;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530428 cpu_relax();
429 udelay(50);
430 } while (pll_lval == 0);
431
432 /* Convert PLL L values to PLL Output rate */
433 for (l = pll_l_rate; l->rate != 0; l++) {
434 if (l->lvalue == pll_lval) {
Matt Wagantallf82f2942012-01-27 13:56:13 -0800435 c->rate = l->rate;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530436 break;
437 }
438 }
439
Matt Wagantallf82f2942012-01-27 13:56:13 -0800440 if (!c->rate) {
Pankaj Kumar3912c982011-12-07 16:59:03 +0530441 pr_crit("Unknown PLL's L value!\n");
442 BUG();
443 }
444
Matt Wagantalla15833b2012-04-03 11:00:56 -0700445 return HANDOFF_ENABLED_CLK;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530446}
447
Matt Wagantallae053222012-05-14 19:42:07 -0700448struct clk_ops clk_ops_pll = {
Pankaj Kumar3912c982011-12-07 16:59:03 +0530449 .enable = pll_clk_enable,
450 .disable = pll_clk_disable,
451 .handoff = pll_clk_handoff,
Pankaj Kumar3912c982011-12-07 16:59:03 +0530452 .is_enabled = pll_clk_is_enabled,
453};
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700454
455static void __init __set_fsm_mode(void __iomem *mode_reg)
456{
457 u32 regval = readl_relaxed(mode_reg);
458
459 /* De-assert reset to FSM */
460 regval &= ~BIT(21);
461 writel_relaxed(regval, mode_reg);
462
463 /* Program bias count */
464 regval &= ~BM(19, 14);
465 regval |= BVAL(19, 14, 0x1);
466 writel_relaxed(regval, mode_reg);
467
468 /* Program lock count */
469 regval &= ~BM(13, 8);
470 regval |= BVAL(13, 8, 0x8);
471 writel_relaxed(regval, mode_reg);
472
473 /* Enable PLL FSM voting */
474 regval |= BIT(20);
475 writel_relaxed(regval, mode_reg);
476}
477
478void __init configure_pll(struct pll_config *config,
479 struct pll_config_regs *regs, u32 ena_fsm_mode)
480{
481 u32 regval;
482
483 writel_relaxed(config->l, PLL_L_REG(regs));
484 writel_relaxed(config->m, PLL_M_REG(regs));
485 writel_relaxed(config->n, PLL_N_REG(regs));
486
487 regval = readl_relaxed(PLL_CONFIG_REG(regs));
488
489 /* Enable the MN accumulator */
490 if (config->mn_ena_mask) {
491 regval &= ~config->mn_ena_mask;
492 regval |= config->mn_ena_val;
493 }
494
495 /* Enable the main output */
496 if (config->main_output_mask) {
497 regval &= ~config->main_output_mask;
498 regval |= config->main_output_val;
499 }
500
501 /* Set pre-divider and post-divider values */
502 regval &= ~config->pre_div_mask;
503 regval |= config->pre_div_val;
504 regval &= ~config->post_div_mask;
505 regval |= config->post_div_val;
506
507 /* Select VCO setting */
508 regval &= ~config->vco_mask;
509 regval |= config->vco_val;
510 writel_relaxed(regval, PLL_CONFIG_REG(regs));
511
512 /* Configure in FSM mode if necessary */
513 if (ena_fsm_mode)
514 __set_fsm_mode(PLL_MODE_REG(regs));
515}