blob: 23941d7fac9ed2b59b591dd5e0c53cda19665c9b [file] [log] [blame]
Pankaj Kumar3912c982011-12-07 16:59:03 +05301/*
2 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/remote_spinlock.h>
19
Vikram Mulukutla681d8682012-03-09 23:56:20 -080020#include <mach/scm-io.h>
Pankaj Kumar3912c982011-12-07 16:59:03 +053021#include <mach/msm_iomap.h>
22
23#include "clock.h"
24#include "clock-pll.h"
25#include "smd_private.h"
26
Vikram Mulukutla681d8682012-03-09 23:56:20 -080027#ifdef CONFIG_MSM_SECURE_IO
28#undef readl_relaxed
29#undef writel_relaxed
30#define readl_relaxed secure_readl
31#define writel_relaxed secure_writel
32#endif
33
34#define PLL_OUTCTRL BIT(0)
35#define PLL_BYPASSNL BIT(1)
36#define PLL_RESET_N BIT(2)
37#define PLL_MODE_MASK BM(3, 0)
38
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070039#define PLL_EN_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->en_reg)) : \
40 ((x)->en_reg))
41#define PLL_STATUS_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->status_reg)) : \
42 ((x)->status_reg))
43#define PLL_MODE_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->mode_reg)) : \
44 ((x)->mode_reg))
Vikram Mulukutla5b146722012-04-23 18:17:50 -070045#define PLL_L_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->l_reg)) : \
46 ((x)->l_reg))
47#define PLL_M_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->m_reg)) : \
48 ((x)->m_reg))
49#define PLL_N_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->n_reg)) : \
50 ((x)->n_reg))
51#define PLL_CONFIG_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->config_reg)) : \
52 ((x)->config_reg))
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070053
Vikram Mulukutla681d8682012-03-09 23:56:20 -080054static DEFINE_SPINLOCK(pll_reg_lock);
55
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070056#define ENABLE_WAIT_MAX_LOOPS 200
57
Matt Wagantallf82f2942012-01-27 13:56:13 -080058int pll_vote_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -080059{
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070060 u32 ena, count;
Vikram Mulukutla681d8682012-03-09 23:56:20 -080061 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -080062 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -080063
64 spin_lock_irqsave(&pll_reg_lock, flags);
Matt Wagantallf82f2942012-01-27 13:56:13 -080065 ena = readl_relaxed(PLL_EN_REG(pllv));
66 ena |= pllv->en_mask;
67 writel_relaxed(ena, PLL_EN_REG(pllv));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080068 spin_unlock_irqrestore(&pll_reg_lock, flags);
69
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070070 /*
71 * Use a memory barrier since some PLL status registers are
72 * not within the same 1K segment as the voting registers.
73 */
74 mb();
Vikram Mulukutla681d8682012-03-09 23:56:20 -080075
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070076 /* Wait for pll to enable. */
77 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
Matt Wagantallf82f2942012-01-27 13:56:13 -080078 if (readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask)
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070079 return 0;
80 udelay(1);
81 }
82
Matt Wagantallf82f2942012-01-27 13:56:13 -080083 WARN("PLL %s didn't enable after voting for it!\n", c->dbg_name);
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070084
85 return -ETIMEDOUT;
Vikram Mulukutla681d8682012-03-09 23:56:20 -080086}
87
Matt Wagantallf82f2942012-01-27 13:56:13 -080088void pll_vote_clk_disable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -080089{
90 u32 ena;
91 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -080092 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -080093
94 spin_lock_irqsave(&pll_reg_lock, flags);
Matt Wagantallf82f2942012-01-27 13:56:13 -080095 ena = readl_relaxed(PLL_EN_REG(pllv));
96 ena &= ~(pllv->en_mask);
97 writel_relaxed(ena, PLL_EN_REG(pllv));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080098 spin_unlock_irqrestore(&pll_reg_lock, flags);
99}
100
Matt Wagantallf82f2942012-01-27 13:56:13 -0800101struct clk *pll_vote_clk_get_parent(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800102{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800103 return to_pll_vote_clk(c)->parent;
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800104}
105
Matt Wagantallf82f2942012-01-27 13:56:13 -0800106int pll_vote_clk_is_enabled(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800107{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800108 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
109 return !!(readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800110}
111
Matt Wagantallf82f2942012-01-27 13:56:13 -0800112static enum handoff pll_vote_clk_handoff(struct clk *c)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700113{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800114 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
115 if (readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700116 return HANDOFF_ENABLED_CLK;
117
118 return HANDOFF_DISABLED_CLK;
119}
120
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800121struct clk_ops clk_ops_pll_vote = {
122 .enable = pll_vote_clk_enable,
123 .disable = pll_vote_clk_disable,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800124 .is_enabled = pll_vote_clk_is_enabled,
125 .get_parent = pll_vote_clk_get_parent,
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700126 .handoff = pll_vote_clk_handoff,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800127};
128
129static void __pll_clk_enable_reg(void __iomem *mode_reg)
130{
131 u32 mode = readl_relaxed(mode_reg);
132 /* Disable PLL bypass mode. */
133 mode |= PLL_BYPASSNL;
134 writel_relaxed(mode, mode_reg);
135
136 /*
137 * H/W requires a 5us delay between disabling the bypass and
138 * de-asserting the reset. Delay 10us just to be safe.
139 */
140 mb();
141 udelay(10);
142
143 /* De-assert active-low PLL reset. */
144 mode |= PLL_RESET_N;
145 writel_relaxed(mode, mode_reg);
146
147 /* Wait until PLL is locked. */
148 mb();
149 udelay(50);
150
151 /* Enable PLL output. */
152 mode |= PLL_OUTCTRL;
153 writel_relaxed(mode, mode_reg);
154
155 /* Ensure that the write above goes through before returning. */
156 mb();
157}
158
Matt Wagantallf82f2942012-01-27 13:56:13 -0800159static int local_pll_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800160{
161 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800162 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800163
164 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700165 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800166 spin_unlock_irqrestore(&pll_reg_lock, flags);
167
168 return 0;
169}
170
171static void __pll_clk_disable_reg(void __iomem *mode_reg)
172{
173 u32 mode = readl_relaxed(mode_reg);
174 mode &= ~PLL_MODE_MASK;
175 writel_relaxed(mode, mode_reg);
176}
177
Matt Wagantallf82f2942012-01-27 13:56:13 -0800178static void local_pll_clk_disable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800179{
180 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800181 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800182
183 /*
184 * Disable the PLL output, disable test mode, enable
185 * the bypass mode, and assert the reset.
186 */
187 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700188 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800189 spin_unlock_irqrestore(&pll_reg_lock, flags);
190}
191
Matt Wagantallf82f2942012-01-27 13:56:13 -0800192static enum handoff local_pll_clk_handoff(struct clk *c)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700193{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800194 struct pll_clk *pll = to_pll_clk(c);
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700195 u32 mode = readl_relaxed(PLL_MODE_REG(pll));
196 u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
197
198 if ((mode & mask) == mask)
199 return HANDOFF_ENABLED_CLK;
200
201 return HANDOFF_DISABLED_CLK;
202}
203
Matt Wagantallf82f2942012-01-27 13:56:13 -0800204static struct clk *local_pll_clk_get_parent(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800205{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800206 return to_pll_clk(c)->parent;
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800207}
208
Matt Wagantallf82f2942012-01-27 13:56:13 -0800209int sr_pll_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800210{
211 u32 mode;
212 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800213 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800214
215 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700216 mode = readl_relaxed(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800217 /* De-assert active-low PLL reset. */
218 mode |= PLL_RESET_N;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700219 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800220
221 /*
222 * H/W requires a 5us delay between disabling the bypass and
223 * de-asserting the reset. Delay 10us just to be safe.
224 */
225 mb();
226 udelay(10);
227
228 /* Disable PLL bypass mode. */
229 mode |= PLL_BYPASSNL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700230 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800231
232 /* Wait until PLL is locked. */
233 mb();
234 udelay(60);
235
236 /* Enable PLL output. */
237 mode |= PLL_OUTCTRL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700238 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800239
240 /* Ensure that the write above goes through before returning. */
241 mb();
242
243 spin_unlock_irqrestore(&pll_reg_lock, flags);
244
245 return 0;
246}
247
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700248#define PLL_LOCKED_BIT BIT(16)
249
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700250int sr_hpm_lp_pll_clk_enable(struct clk *c)
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700251{
252 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800253 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700254 u32 count, mode;
255 int ret = 0;
256
257 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700258
259 /* Disable PLL bypass mode and de-assert reset. */
260 mode = PLL_BYPASSNL | PLL_RESET_N;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700261 writel_relaxed(mode, PLL_MODE_REG(pll));
262
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700263 /* Wait for pll to lock. */
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700264 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
265 if (readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)
266 break;
267 udelay(1);
268 }
269
270 if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)) {
Matt Wagantallf82f2942012-01-27 13:56:13 -0800271 WARN("PLL %s didn't lock after enabling it!\n", c->dbg_name);
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700272 ret = -ETIMEDOUT;
273 goto out;
274 }
275
276 /* Enable PLL output. */
Matt Wagantall10dde282012-05-14 19:30:11 -0700277 mode |= PLL_OUTCTRL;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700278 writel_relaxed(mode, PLL_MODE_REG(pll));
279
280 /* Ensure the write above goes through before returning. */
281 mb();
282
283out:
284 spin_unlock_irqrestore(&pll_reg_lock, flags);
285 return ret;
286}
287
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800288struct clk_ops clk_ops_local_pll = {
289 .enable = local_pll_clk_enable,
290 .disable = local_pll_clk_disable,
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700291 .handoff = local_pll_clk_handoff,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800292 .get_parent = local_pll_clk_get_parent,
293};
294
Pankaj Kumar3912c982011-12-07 16:59:03 +0530295struct pll_rate {
296 unsigned int lvalue;
297 unsigned long rate;
298};
299
300static struct pll_rate pll_l_rate[] = {
301 {10, 196000000},
302 {12, 245760000},
303 {30, 589820000},
304 {38, 737280000},
305 {41, 800000000},
306 {50, 960000000},
307 {52, 1008000000},
Trilok Soni48631722012-05-17 20:56:42 +0530308 {60, 1152000000},
Pankaj Kumar3912c982011-12-07 16:59:03 +0530309 {62, 1200000000},
Pankaj Kumar50c705c2012-01-10 12:02:07 +0530310 {63, 1209600000},
Kaushal Kumar86473f02012-06-28 19:35:58 +0530311 {73, 1401600000},
Pankaj Kumar3912c982011-12-07 16:59:03 +0530312 {0, 0},
313};
314
315#define PLL_BASE 7
316
317struct shared_pll_control {
318 uint32_t version;
319 struct {
320 /*
321 * Denotes if the PLL is ON. Technically, this can be read
322 * directly from the PLL registers, but this feild is here,
323 * so let's use it.
324 */
325 uint32_t on;
326 /*
327 * One bit for each processor core. The application processor
328 * is allocated bit position 1. All other bits should be
329 * considered as votes from other processors.
330 */
331 uint32_t votes;
332 } pll[PLL_BASE + PLL_END];
333};
334
335static remote_spinlock_t pll_lock;
336static struct shared_pll_control *pll_control;
337
338void __init msm_shared_pll_control_init(void)
339{
340#define PLL_REMOTE_SPINLOCK_ID "S:7"
341 unsigned smem_size;
342
343 remote_spin_lock_init(&pll_lock, PLL_REMOTE_SPINLOCK_ID);
344
345 pll_control = smem_get_entry(SMEM_CLKREGIM_SOURCES, &smem_size);
346 if (!pll_control) {
347 pr_err("Can't find shared PLL control data structure!\n");
348 BUG();
349 /*
350 * There might be more PLLs than what the application processor knows
351 * about. But the index used for each PLL is guaranteed to remain the
352 * same.
353 */
354 } else if (smem_size < sizeof(struct shared_pll_control)) {
355 pr_err("Shared PLL control data"
356 "structure too small!\n");
357 BUG();
358 } else if (pll_control->version != 0xCCEE0001) {
359 pr_err("Shared PLL control version mismatch!\n");
360 BUG();
361 } else {
362 pr_info("Shared PLL control available.\n");
363 return;
364 }
365
366}
367
Matt Wagantallf82f2942012-01-27 13:56:13 -0800368static int pll_clk_enable(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530369{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800370 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530371 unsigned int pll_id = pll->id;
372
373 remote_spin_lock(&pll_lock);
374
375 pll_control->pll[PLL_BASE + pll_id].votes |= BIT(1);
376 if (!pll_control->pll[PLL_BASE + pll_id].on) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700377 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530378 pll_control->pll[PLL_BASE + pll_id].on = 1;
379 }
380
381 remote_spin_unlock(&pll_lock);
382 return 0;
383}
384
Matt Wagantallf82f2942012-01-27 13:56:13 -0800385static void pll_clk_disable(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530386{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800387 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530388 unsigned int pll_id = pll->id;
389
390 remote_spin_lock(&pll_lock);
391
392 pll_control->pll[PLL_BASE + pll_id].votes &= ~BIT(1);
393 if (pll_control->pll[PLL_BASE + pll_id].on
394 && !pll_control->pll[PLL_BASE + pll_id].votes) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700395 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530396 pll_control->pll[PLL_BASE + pll_id].on = 0;
397 }
398
399 remote_spin_unlock(&pll_lock);
400}
401
Matt Wagantallf82f2942012-01-27 13:56:13 -0800402static int pll_clk_is_enabled(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530403{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800404 return readl_relaxed(PLL_MODE_REG(to_pll_shared_clk(c))) & BIT(0);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530405}
406
Matt Wagantallf82f2942012-01-27 13:56:13 -0800407static enum handoff pll_clk_handoff(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530408{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800409 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530410 unsigned int pll_lval;
411 struct pll_rate *l;
412
413 /*
414 * Wait for the PLLs to be initialized and then read their frequency.
415 */
416 do {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700417 pll_lval = readl_relaxed(PLL_MODE_REG(pll) + 4) & 0x3ff;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530418 cpu_relax();
419 udelay(50);
420 } while (pll_lval == 0);
421
422 /* Convert PLL L values to PLL Output rate */
423 for (l = pll_l_rate; l->rate != 0; l++) {
424 if (l->lvalue == pll_lval) {
Matt Wagantallf82f2942012-01-27 13:56:13 -0800425 c->rate = l->rate;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530426 break;
427 }
428 }
429
Matt Wagantallf82f2942012-01-27 13:56:13 -0800430 if (!c->rate) {
Pankaj Kumar3912c982011-12-07 16:59:03 +0530431 pr_crit("Unknown PLL's L value!\n");
432 BUG();
433 }
434
Matt Wagantalla15833b2012-04-03 11:00:56 -0700435 return HANDOFF_ENABLED_CLK;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530436}
437
Matt Wagantallae053222012-05-14 19:42:07 -0700438struct clk_ops clk_ops_pll = {
Pankaj Kumar3912c982011-12-07 16:59:03 +0530439 .enable = pll_clk_enable,
440 .disable = pll_clk_disable,
441 .handoff = pll_clk_handoff,
Pankaj Kumar3912c982011-12-07 16:59:03 +0530442 .is_enabled = pll_clk_is_enabled,
443};
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700444
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700445static void __init __set_fsm_mode(void __iomem *mode_reg,
446 u32 bias_count, u32 lock_count)
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700447{
448 u32 regval = readl_relaxed(mode_reg);
449
450 /* De-assert reset to FSM */
451 regval &= ~BIT(21);
452 writel_relaxed(regval, mode_reg);
453
454 /* Program bias count */
455 regval &= ~BM(19, 14);
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700456 regval |= BVAL(19, 14, bias_count);
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700457 writel_relaxed(regval, mode_reg);
458
459 /* Program lock count */
460 regval &= ~BM(13, 8);
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700461 regval |= BVAL(13, 8, lock_count);
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700462 writel_relaxed(regval, mode_reg);
463
464 /* Enable PLL FSM voting */
465 regval |= BIT(20);
466 writel_relaxed(regval, mode_reg);
467}
468
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700469void __init __configure_pll(struct pll_config *config,
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700470 struct pll_config_regs *regs, u32 ena_fsm_mode)
471{
472 u32 regval;
473
474 writel_relaxed(config->l, PLL_L_REG(regs));
475 writel_relaxed(config->m, PLL_M_REG(regs));
476 writel_relaxed(config->n, PLL_N_REG(regs));
477
478 regval = readl_relaxed(PLL_CONFIG_REG(regs));
479
480 /* Enable the MN accumulator */
481 if (config->mn_ena_mask) {
482 regval &= ~config->mn_ena_mask;
483 regval |= config->mn_ena_val;
484 }
485
486 /* Enable the main output */
487 if (config->main_output_mask) {
488 regval &= ~config->main_output_mask;
489 regval |= config->main_output_val;
490 }
491
492 /* Set pre-divider and post-divider values */
493 regval &= ~config->pre_div_mask;
494 regval |= config->pre_div_val;
495 regval &= ~config->post_div_mask;
496 regval |= config->post_div_val;
497
498 /* Select VCO setting */
499 regval &= ~config->vco_mask;
500 regval |= config->vco_val;
501 writel_relaxed(regval, PLL_CONFIG_REG(regs));
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700502}
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700503
504void __init configure_sr_pll(struct pll_config *config,
505 struct pll_config_regs *regs, u32 ena_fsm_mode)
506{
507 __configure_pll(config, regs, ena_fsm_mode);
508 if (ena_fsm_mode)
509 __set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x8);
510}
511
512void __init configure_sr_hpm_lp_pll(struct pll_config *config,
513 struct pll_config_regs *regs, u32 ena_fsm_mode)
514{
515 __configure_pll(config, regs, ena_fsm_mode);
516 if (ena_fsm_mode)
517 __set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x0);
518}
519