blob: 49bb0631dd52441fdefc7956768f589a69492430 [file] [log] [blame]
Pankaj Kumar3912c982011-12-07 16:59:03 +05301/*
2 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/remote_spinlock.h>
19
Vikram Mulukutla681d8682012-03-09 23:56:20 -080020#include <mach/scm-io.h>
Pankaj Kumar3912c982011-12-07 16:59:03 +053021#include <mach/msm_iomap.h>
22
23#include "clock.h"
24#include "clock-pll.h"
25#include "smd_private.h"
26
Vikram Mulukutla681d8682012-03-09 23:56:20 -080027#ifdef CONFIG_MSM_SECURE_IO
28#undef readl_relaxed
29#undef writel_relaxed
30#define readl_relaxed secure_readl
31#define writel_relaxed secure_writel
32#endif
33
34#define PLL_OUTCTRL BIT(0)
35#define PLL_BYPASSNL BIT(1)
36#define PLL_RESET_N BIT(2)
37#define PLL_MODE_MASK BM(3, 0)
38
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070039#define PLL_EN_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->en_reg)) : \
40 ((x)->en_reg))
41#define PLL_STATUS_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->status_reg)) : \
42 ((x)->status_reg))
43#define PLL_MODE_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->mode_reg)) : \
44 ((x)->mode_reg))
Vikram Mulukutla5b146722012-04-23 18:17:50 -070045#define PLL_L_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->l_reg)) : \
46 ((x)->l_reg))
47#define PLL_M_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->m_reg)) : \
48 ((x)->m_reg))
49#define PLL_N_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->n_reg)) : \
50 ((x)->n_reg))
51#define PLL_CONFIG_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->config_reg)) : \
52 ((x)->config_reg))
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070053
Vikram Mulukutla681d8682012-03-09 23:56:20 -080054static DEFINE_SPINLOCK(pll_reg_lock);
55
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070056#define ENABLE_WAIT_MAX_LOOPS 200
57
Matt Wagantallf82f2942012-01-27 13:56:13 -080058int pll_vote_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -080059{
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070060 u32 ena, count;
Vikram Mulukutla681d8682012-03-09 23:56:20 -080061 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -080062 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -080063
64 spin_lock_irqsave(&pll_reg_lock, flags);
Matt Wagantallf82f2942012-01-27 13:56:13 -080065 ena = readl_relaxed(PLL_EN_REG(pllv));
66 ena |= pllv->en_mask;
67 writel_relaxed(ena, PLL_EN_REG(pllv));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080068 spin_unlock_irqrestore(&pll_reg_lock, flags);
69
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070070 /*
71 * Use a memory barrier since some PLL status registers are
72 * not within the same 1K segment as the voting registers.
73 */
74 mb();
Vikram Mulukutla681d8682012-03-09 23:56:20 -080075
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070076 /* Wait for pll to enable. */
77 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
Matt Wagantallf82f2942012-01-27 13:56:13 -080078 if (readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask)
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070079 return 0;
80 udelay(1);
81 }
82
Matt Wagantallf82f2942012-01-27 13:56:13 -080083 WARN("PLL %s didn't enable after voting for it!\n", c->dbg_name);
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070084
85 return -ETIMEDOUT;
Vikram Mulukutla681d8682012-03-09 23:56:20 -080086}
87
Matt Wagantallf82f2942012-01-27 13:56:13 -080088void pll_vote_clk_disable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -080089{
90 u32 ena;
91 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -080092 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -080093
94 spin_lock_irqsave(&pll_reg_lock, flags);
Matt Wagantallf82f2942012-01-27 13:56:13 -080095 ena = readl_relaxed(PLL_EN_REG(pllv));
96 ena &= ~(pllv->en_mask);
97 writel_relaxed(ena, PLL_EN_REG(pllv));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080098 spin_unlock_irqrestore(&pll_reg_lock, flags);
99}
100
Matt Wagantallf82f2942012-01-27 13:56:13 -0800101struct clk *pll_vote_clk_get_parent(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800102{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800103 return to_pll_vote_clk(c)->parent;
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800104}
105
Matt Wagantallf82f2942012-01-27 13:56:13 -0800106int pll_vote_clk_is_enabled(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800107{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800108 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
109 return !!(readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800110}
111
Matt Wagantallf82f2942012-01-27 13:56:13 -0800112static enum handoff pll_vote_clk_handoff(struct clk *c)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700113{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800114 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
115 if (readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700116 return HANDOFF_ENABLED_CLK;
117
118 return HANDOFF_DISABLED_CLK;
119}
120
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800121struct clk_ops clk_ops_pll_vote = {
122 .enable = pll_vote_clk_enable,
123 .disable = pll_vote_clk_disable,
124 .auto_off = pll_vote_clk_disable,
125 .is_enabled = pll_vote_clk_is_enabled,
126 .get_parent = pll_vote_clk_get_parent,
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700127 .handoff = pll_vote_clk_handoff,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800128};
129
130static void __pll_clk_enable_reg(void __iomem *mode_reg)
131{
132 u32 mode = readl_relaxed(mode_reg);
133 /* Disable PLL bypass mode. */
134 mode |= PLL_BYPASSNL;
135 writel_relaxed(mode, mode_reg);
136
137 /*
138 * H/W requires a 5us delay between disabling the bypass and
139 * de-asserting the reset. Delay 10us just to be safe.
140 */
141 mb();
142 udelay(10);
143
144 /* De-assert active-low PLL reset. */
145 mode |= PLL_RESET_N;
146 writel_relaxed(mode, mode_reg);
147
148 /* Wait until PLL is locked. */
149 mb();
150 udelay(50);
151
152 /* Enable PLL output. */
153 mode |= PLL_OUTCTRL;
154 writel_relaxed(mode, mode_reg);
155
156 /* Ensure that the write above goes through before returning. */
157 mb();
158}
159
Matt Wagantallf82f2942012-01-27 13:56:13 -0800160static int local_pll_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800161{
162 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800163 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800164
165 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700166 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800167 spin_unlock_irqrestore(&pll_reg_lock, flags);
168
169 return 0;
170}
171
172static void __pll_clk_disable_reg(void __iomem *mode_reg)
173{
174 u32 mode = readl_relaxed(mode_reg);
175 mode &= ~PLL_MODE_MASK;
176 writel_relaxed(mode, mode_reg);
177}
178
Matt Wagantallf82f2942012-01-27 13:56:13 -0800179static void local_pll_clk_disable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800180{
181 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800182 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800183
184 /*
185 * Disable the PLL output, disable test mode, enable
186 * the bypass mode, and assert the reset.
187 */
188 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700189 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800190 spin_unlock_irqrestore(&pll_reg_lock, flags);
191}
192
Matt Wagantallf82f2942012-01-27 13:56:13 -0800193static enum handoff local_pll_clk_handoff(struct clk *c)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700194{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800195 struct pll_clk *pll = to_pll_clk(c);
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700196 u32 mode = readl_relaxed(PLL_MODE_REG(pll));
197 u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
198
199 if ((mode & mask) == mask)
200 return HANDOFF_ENABLED_CLK;
201
202 return HANDOFF_DISABLED_CLK;
203}
204
Matt Wagantallf82f2942012-01-27 13:56:13 -0800205static struct clk *local_pll_clk_get_parent(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800206{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800207 return to_pll_clk(c)->parent;
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800208}
209
Matt Wagantallf82f2942012-01-27 13:56:13 -0800210int sr_pll_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800211{
212 u32 mode;
213 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800214 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800215
216 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700217 mode = readl_relaxed(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800218 /* De-assert active-low PLL reset. */
219 mode |= PLL_RESET_N;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700220 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800221
222 /*
223 * H/W requires a 5us delay between disabling the bypass and
224 * de-asserting the reset. Delay 10us just to be safe.
225 */
226 mb();
227 udelay(10);
228
229 /* Disable PLL bypass mode. */
230 mode |= PLL_BYPASSNL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700231 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800232
233 /* Wait until PLL is locked. */
234 mb();
235 udelay(60);
236
237 /* Enable PLL output. */
238 mode |= PLL_OUTCTRL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700239 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800240
241 /* Ensure that the write above goes through before returning. */
242 mb();
243
244 spin_unlock_irqrestore(&pll_reg_lock, flags);
245
246 return 0;
247}
248
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700249#define PLL_LOCKED_BIT BIT(16)
250
Matt Wagantallf82f2942012-01-27 13:56:13 -0800251int copper_pll_clk_enable(struct clk *c)
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700252{
253 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800254 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700255 u32 count, mode;
256 int ret = 0;
257
258 spin_lock_irqsave(&pll_reg_lock, flags);
259 mode = readl_relaxed(PLL_MODE_REG(pll));
260 /* Disable PLL bypass mode. */
Matt Wagantall10dde282012-05-14 19:30:11 -0700261 mode |= PLL_BYPASSNL;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700262 writel_relaxed(mode, PLL_MODE_REG(pll));
263
264 /*
265 * H/W requires a 5us delay between disabling the bypass and
266 * de-asserting the reset. Delay 10us just to be safe.
267 */
268 mb();
269 udelay(10);
270
271 /* De-assert active-low PLL reset. */
Matt Wagantall10dde282012-05-14 19:30:11 -0700272 mode |= PLL_RESET_N;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700273 writel_relaxed(mode, PLL_MODE_REG(pll));
274
275 /* Wait for pll to enable. */
276 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
277 if (readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)
278 break;
279 udelay(1);
280 }
281
282 if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)) {
Matt Wagantallf82f2942012-01-27 13:56:13 -0800283 WARN("PLL %s didn't lock after enabling it!\n", c->dbg_name);
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700284 ret = -ETIMEDOUT;
285 goto out;
286 }
287
288 /* Enable PLL output. */
Matt Wagantall10dde282012-05-14 19:30:11 -0700289 mode |= PLL_OUTCTRL;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700290 writel_relaxed(mode, PLL_MODE_REG(pll));
291
292 /* Ensure the write above goes through before returning. */
293 mb();
294
295out:
296 spin_unlock_irqrestore(&pll_reg_lock, flags);
297 return ret;
298}
299
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800300struct clk_ops clk_ops_local_pll = {
301 .enable = local_pll_clk_enable,
302 .disable = local_pll_clk_disable,
303 .auto_off = local_pll_clk_disable,
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700304 .handoff = local_pll_clk_handoff,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800305 .get_parent = local_pll_clk_get_parent,
306};
307
Pankaj Kumar3912c982011-12-07 16:59:03 +0530308struct pll_rate {
309 unsigned int lvalue;
310 unsigned long rate;
311};
312
313static struct pll_rate pll_l_rate[] = {
314 {10, 196000000},
315 {12, 245760000},
316 {30, 589820000},
317 {38, 737280000},
318 {41, 800000000},
319 {50, 960000000},
320 {52, 1008000000},
Trilok Soni48631722012-05-17 20:56:42 +0530321 {60, 1152000000},
Pankaj Kumar3912c982011-12-07 16:59:03 +0530322 {62, 1200000000},
Pankaj Kumar50c705c2012-01-10 12:02:07 +0530323 {63, 1209600000},
Pankaj Kumar3912c982011-12-07 16:59:03 +0530324 {0, 0},
325};
326
327#define PLL_BASE 7
328
329struct shared_pll_control {
330 uint32_t version;
331 struct {
332 /*
333 * Denotes if the PLL is ON. Technically, this can be read
334 * directly from the PLL registers, but this feild is here,
335 * so let's use it.
336 */
337 uint32_t on;
338 /*
339 * One bit for each processor core. The application processor
340 * is allocated bit position 1. All other bits should be
341 * considered as votes from other processors.
342 */
343 uint32_t votes;
344 } pll[PLL_BASE + PLL_END];
345};
346
347static remote_spinlock_t pll_lock;
348static struct shared_pll_control *pll_control;
349
350void __init msm_shared_pll_control_init(void)
351{
352#define PLL_REMOTE_SPINLOCK_ID "S:7"
353 unsigned smem_size;
354
355 remote_spin_lock_init(&pll_lock, PLL_REMOTE_SPINLOCK_ID);
356
357 pll_control = smem_get_entry(SMEM_CLKREGIM_SOURCES, &smem_size);
358 if (!pll_control) {
359 pr_err("Can't find shared PLL control data structure!\n");
360 BUG();
361 /*
362 * There might be more PLLs than what the application processor knows
363 * about. But the index used for each PLL is guaranteed to remain the
364 * same.
365 */
366 } else if (smem_size < sizeof(struct shared_pll_control)) {
367 pr_err("Shared PLL control data"
368 "structure too small!\n");
369 BUG();
370 } else if (pll_control->version != 0xCCEE0001) {
371 pr_err("Shared PLL control version mismatch!\n");
372 BUG();
373 } else {
374 pr_info("Shared PLL control available.\n");
375 return;
376 }
377
378}
379
Matt Wagantallf82f2942012-01-27 13:56:13 -0800380static int pll_clk_enable(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530381{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800382 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530383 unsigned int pll_id = pll->id;
384
385 remote_spin_lock(&pll_lock);
386
387 pll_control->pll[PLL_BASE + pll_id].votes |= BIT(1);
388 if (!pll_control->pll[PLL_BASE + pll_id].on) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700389 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530390 pll_control->pll[PLL_BASE + pll_id].on = 1;
391 }
392
393 remote_spin_unlock(&pll_lock);
394 return 0;
395}
396
Matt Wagantallf82f2942012-01-27 13:56:13 -0800397static void pll_clk_disable(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530398{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800399 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530400 unsigned int pll_id = pll->id;
401
402 remote_spin_lock(&pll_lock);
403
404 pll_control->pll[PLL_BASE + pll_id].votes &= ~BIT(1);
405 if (pll_control->pll[PLL_BASE + pll_id].on
406 && !pll_control->pll[PLL_BASE + pll_id].votes) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700407 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530408 pll_control->pll[PLL_BASE + pll_id].on = 0;
409 }
410
411 remote_spin_unlock(&pll_lock);
412}
413
Matt Wagantallf82f2942012-01-27 13:56:13 -0800414static int pll_clk_is_enabled(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530415{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800416 return readl_relaxed(PLL_MODE_REG(to_pll_shared_clk(c))) & BIT(0);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530417}
418
Matt Wagantallf82f2942012-01-27 13:56:13 -0800419static enum handoff pll_clk_handoff(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530420{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800421 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530422 unsigned int pll_lval;
423 struct pll_rate *l;
424
425 /*
426 * Wait for the PLLs to be initialized and then read their frequency.
427 */
428 do {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700429 pll_lval = readl_relaxed(PLL_MODE_REG(pll) + 4) & 0x3ff;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530430 cpu_relax();
431 udelay(50);
432 } while (pll_lval == 0);
433
434 /* Convert PLL L values to PLL Output rate */
435 for (l = pll_l_rate; l->rate != 0; l++) {
436 if (l->lvalue == pll_lval) {
Matt Wagantallf82f2942012-01-27 13:56:13 -0800437 c->rate = l->rate;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530438 break;
439 }
440 }
441
Matt Wagantallf82f2942012-01-27 13:56:13 -0800442 if (!c->rate) {
Pankaj Kumar3912c982011-12-07 16:59:03 +0530443 pr_crit("Unknown PLL's L value!\n");
444 BUG();
445 }
446
Matt Wagantalla15833b2012-04-03 11:00:56 -0700447 return HANDOFF_ENABLED_CLK;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530448}
449
Matt Wagantallae053222012-05-14 19:42:07 -0700450struct clk_ops clk_ops_pll = {
Pankaj Kumar3912c982011-12-07 16:59:03 +0530451 .enable = pll_clk_enable,
452 .disable = pll_clk_disable,
453 .handoff = pll_clk_handoff,
Pankaj Kumar3912c982011-12-07 16:59:03 +0530454 .is_enabled = pll_clk_is_enabled,
455};
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700456
457static void __init __set_fsm_mode(void __iomem *mode_reg)
458{
459 u32 regval = readl_relaxed(mode_reg);
460
461 /* De-assert reset to FSM */
462 regval &= ~BIT(21);
463 writel_relaxed(regval, mode_reg);
464
465 /* Program bias count */
466 regval &= ~BM(19, 14);
467 regval |= BVAL(19, 14, 0x1);
468 writel_relaxed(regval, mode_reg);
469
470 /* Program lock count */
471 regval &= ~BM(13, 8);
472 regval |= BVAL(13, 8, 0x8);
473 writel_relaxed(regval, mode_reg);
474
475 /* Enable PLL FSM voting */
476 regval |= BIT(20);
477 writel_relaxed(regval, mode_reg);
478}
479
480void __init configure_pll(struct pll_config *config,
481 struct pll_config_regs *regs, u32 ena_fsm_mode)
482{
483 u32 regval;
484
485 writel_relaxed(config->l, PLL_L_REG(regs));
486 writel_relaxed(config->m, PLL_M_REG(regs));
487 writel_relaxed(config->n, PLL_N_REG(regs));
488
489 regval = readl_relaxed(PLL_CONFIG_REG(regs));
490
491 /* Enable the MN accumulator */
492 if (config->mn_ena_mask) {
493 regval &= ~config->mn_ena_mask;
494 regval |= config->mn_ena_val;
495 }
496
497 /* Enable the main output */
498 if (config->main_output_mask) {
499 regval &= ~config->main_output_mask;
500 regval |= config->main_output_val;
501 }
502
503 /* Set pre-divider and post-divider values */
504 regval &= ~config->pre_div_mask;
505 regval |= config->pre_div_val;
506 regval &= ~config->post_div_mask;
507 regval |= config->post_div_val;
508
509 /* Select VCO setting */
510 regval &= ~config->vco_mask;
511 regval |= config->vco_val;
512 writel_relaxed(regval, PLL_CONFIG_REG(regs));
513
514 /* Configure in FSM mode if necessary */
515 if (ena_fsm_mode)
516 __set_fsm_mode(PLL_MODE_REG(regs));
517}