blob: c82058b904ac41a2958dad954944060d47f3bf9d [file] [log] [blame]
Pankaj Kumar3912c982011-12-07 16:59:03 +05301/*
Duy Truong790f06d2013-02-13 16:38:12 -08002 * Copyright (c) 2012, The Linux Foundation. All rights reserved.
Pankaj Kumar3912c982011-12-07 16:59:03 +05303 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/remote_spinlock.h>
19
Vikram Mulukutla681d8682012-03-09 23:56:20 -080020#include <mach/scm-io.h>
Pankaj Kumar3912c982011-12-07 16:59:03 +053021#include <mach/msm_iomap.h>
22
23#include "clock.h"
24#include "clock-pll.h"
25#include "smd_private.h"
26
Vikram Mulukutla681d8682012-03-09 23:56:20 -080027#ifdef CONFIG_MSM_SECURE_IO
28#undef readl_relaxed
29#undef writel_relaxed
30#define readl_relaxed secure_readl
31#define writel_relaxed secure_writel
32#endif
33
34#define PLL_OUTCTRL BIT(0)
35#define PLL_BYPASSNL BIT(1)
36#define PLL_RESET_N BIT(2)
37#define PLL_MODE_MASK BM(3, 0)
38
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070039#define PLL_EN_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->en_reg)) : \
40 ((x)->en_reg))
41#define PLL_STATUS_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->status_reg)) : \
42 ((x)->status_reg))
43#define PLL_MODE_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->mode_reg)) : \
44 ((x)->mode_reg))
Vikram Mulukutla5b146722012-04-23 18:17:50 -070045#define PLL_L_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->l_reg)) : \
46 ((x)->l_reg))
47#define PLL_M_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->m_reg)) : \
48 ((x)->m_reg))
49#define PLL_N_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->n_reg)) : \
50 ((x)->n_reg))
51#define PLL_CONFIG_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->config_reg)) : \
52 ((x)->config_reg))
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070053
Vikram Mulukutla681d8682012-03-09 23:56:20 -080054static DEFINE_SPINLOCK(pll_reg_lock);
55
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070056#define ENABLE_WAIT_MAX_LOOPS 200
Patrick Daly79323142012-12-05 15:06:42 -080057#define PLL_LOCKED_BIT BIT(16)
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070058
Tianyi Goub0f74a92012-10-11 14:10:08 -070059static int pll_vote_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -080060{
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070061 u32 ena, count;
Vikram Mulukutla681d8682012-03-09 23:56:20 -080062 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -080063 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -080064
65 spin_lock_irqsave(&pll_reg_lock, flags);
Matt Wagantallf82f2942012-01-27 13:56:13 -080066 ena = readl_relaxed(PLL_EN_REG(pllv));
67 ena |= pllv->en_mask;
68 writel_relaxed(ena, PLL_EN_REG(pllv));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080069 spin_unlock_irqrestore(&pll_reg_lock, flags);
70
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070071 /*
72 * Use a memory barrier since some PLL status registers are
73 * not within the same 1K segment as the voting registers.
74 */
75 mb();
Vikram Mulukutla681d8682012-03-09 23:56:20 -080076
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070077 /* Wait for pll to enable. */
78 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
Matt Wagantallf82f2942012-01-27 13:56:13 -080079 if (readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask)
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070080 return 0;
81 udelay(1);
82 }
83
Matt Wagantallf82f2942012-01-27 13:56:13 -080084 WARN("PLL %s didn't enable after voting for it!\n", c->dbg_name);
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070085
86 return -ETIMEDOUT;
Vikram Mulukutla681d8682012-03-09 23:56:20 -080087}
88
Tianyi Goub0f74a92012-10-11 14:10:08 -070089static void pll_vote_clk_disable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -080090{
91 u32 ena;
92 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -080093 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -080094
95 spin_lock_irqsave(&pll_reg_lock, flags);
Matt Wagantallf82f2942012-01-27 13:56:13 -080096 ena = readl_relaxed(PLL_EN_REG(pllv));
97 ena &= ~(pllv->en_mask);
98 writel_relaxed(ena, PLL_EN_REG(pllv));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080099 spin_unlock_irqrestore(&pll_reg_lock, flags);
100}
101
Tianyi Goub0f74a92012-10-11 14:10:08 -0700102static int pll_vote_clk_is_enabled(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800103{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800104 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
105 return !!(readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800106}
107
Matt Wagantallf82f2942012-01-27 13:56:13 -0800108static enum handoff pll_vote_clk_handoff(struct clk *c)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700109{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800110 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
111 if (readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700112 return HANDOFF_ENABLED_CLK;
113
114 return HANDOFF_DISABLED_CLK;
115}
116
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800117struct clk_ops clk_ops_pll_vote = {
118 .enable = pll_vote_clk_enable,
119 .disable = pll_vote_clk_disable,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800120 .is_enabled = pll_vote_clk_is_enabled,
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700121 .handoff = pll_vote_clk_handoff,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800122};
123
Tianyi Goua836dd32012-09-14 10:21:10 -0700124static void __pll_config_reg(void __iomem *pll_config, struct pll_freq_tbl *f,
125 struct pll_config_masks *masks)
126{
127 u32 regval;
128
129 regval = readl_relaxed(pll_config);
130
131 /* Enable the MN counter if used */
132 if (f->m_val)
133 regval |= masks->mn_en_mask;
134
135 /* Set pre-divider and post-divider values */
136 regval &= ~masks->pre_div_mask;
137 regval |= f->pre_div_val;
138 regval &= ~masks->post_div_mask;
139 regval |= f->post_div_val;
140
141 /* Select VCO setting */
142 regval &= ~masks->vco_mask;
143 regval |= f->vco_val;
144
145 /* Enable main output if it has not been enabled */
146 if (masks->main_output_mask && !(regval & masks->main_output_mask))
147 regval |= masks->main_output_mask;
148
149 writel_relaxed(regval, pll_config);
150}
151
Patrick Daly79323142012-12-05 15:06:42 -0800152static int sr2_pll_clk_enable(struct clk *c)
153{
154 unsigned long flags;
155 struct pll_clk *pll = to_pll_clk(c);
156 int ret = 0, count;
157 u32 mode = readl_relaxed(PLL_MODE_REG(pll));
158
159 spin_lock_irqsave(&pll_reg_lock, flags);
160
161 /* Disable PLL bypass mode. */
162 mode |= PLL_BYPASSNL;
163 writel_relaxed(mode, PLL_MODE_REG(pll));
164
165 /*
166 * H/W requires a 5us delay between disabling the bypass and
167 * de-asserting the reset. Delay 10us just to be safe.
168 */
169 mb();
170 udelay(10);
171
172 /* De-assert active-low PLL reset. */
173 mode |= PLL_RESET_N;
174 writel_relaxed(mode, PLL_MODE_REG(pll));
175
176 /* Wait for pll to lock. */
177 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
178 if (readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)
179 break;
180 udelay(1);
181 }
182
183 if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT))
184 pr_err("PLL %s didn't lock after enabling it!\n", c->dbg_name);
185
186 /* Enable PLL output. */
187 mode |= PLL_OUTCTRL;
188 writel_relaxed(mode, PLL_MODE_REG(pll));
189
190 /* Ensure that the write above goes through before returning. */
191 mb();
192
193 spin_unlock_irqrestore(&pll_reg_lock, flags);
194 return ret;
195}
196
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800197static void __pll_clk_enable_reg(void __iomem *mode_reg)
198{
199 u32 mode = readl_relaxed(mode_reg);
200 /* Disable PLL bypass mode. */
201 mode |= PLL_BYPASSNL;
202 writel_relaxed(mode, mode_reg);
203
204 /*
205 * H/W requires a 5us delay between disabling the bypass and
206 * de-asserting the reset. Delay 10us just to be safe.
207 */
208 mb();
209 udelay(10);
210
211 /* De-assert active-low PLL reset. */
212 mode |= PLL_RESET_N;
213 writel_relaxed(mode, mode_reg);
214
215 /* Wait until PLL is locked. */
216 mb();
217 udelay(50);
218
219 /* Enable PLL output. */
220 mode |= PLL_OUTCTRL;
221 writel_relaxed(mode, mode_reg);
222
223 /* Ensure that the write above goes through before returning. */
224 mb();
225}
226
Matt Wagantallf82f2942012-01-27 13:56:13 -0800227static int local_pll_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800228{
229 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800230 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800231
232 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700233 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800234 spin_unlock_irqrestore(&pll_reg_lock, flags);
235
236 return 0;
237}
238
239static void __pll_clk_disable_reg(void __iomem *mode_reg)
240{
241 u32 mode = readl_relaxed(mode_reg);
242 mode &= ~PLL_MODE_MASK;
243 writel_relaxed(mode, mode_reg);
244}
245
Matt Wagantallf82f2942012-01-27 13:56:13 -0800246static void local_pll_clk_disable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800247{
248 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800249 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800250
251 /*
252 * Disable the PLL output, disable test mode, enable
253 * the bypass mode, and assert the reset.
254 */
255 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700256 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800257 spin_unlock_irqrestore(&pll_reg_lock, flags);
258}
259
Matt Wagantallf82f2942012-01-27 13:56:13 -0800260static enum handoff local_pll_clk_handoff(struct clk *c)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700261{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800262 struct pll_clk *pll = to_pll_clk(c);
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700263 u32 mode = readl_relaxed(PLL_MODE_REG(pll));
264 u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
265
266 if ((mode & mask) == mask)
267 return HANDOFF_ENABLED_CLK;
268
269 return HANDOFF_DISABLED_CLK;
270}
271
Tianyi Goua836dd32012-09-14 10:21:10 -0700272static int local_pll_clk_set_rate(struct clk *c, unsigned long rate)
273{
274 struct pll_freq_tbl *nf;
275 struct pll_clk *pll = to_pll_clk(c);
276 u32 mode;
277
278 mode = readl_relaxed(PLL_MODE_REG(pll));
279
280 /* Don't change PLL's rate if it is enabled */
281 if ((mode & PLL_MODE_MASK) == PLL_MODE_MASK)
282 return -EBUSY;
283
284 for (nf = pll->freq_tbl; nf->freq_hz != PLL_FREQ_END
285 && nf->freq_hz != rate; nf++)
286 ;
287
288 if (nf->freq_hz == PLL_FREQ_END)
289 return -EINVAL;
290
291 writel_relaxed(nf->l_val, PLL_L_REG(pll));
292 writel_relaxed(nf->m_val, PLL_M_REG(pll));
293 writel_relaxed(nf->n_val, PLL_N_REG(pll));
294
295 __pll_config_reg(PLL_CONFIG_REG(pll), nf, &pll->masks);
296
297 return 0;
298}
299
Matt Wagantallf82f2942012-01-27 13:56:13 -0800300int sr_pll_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800301{
302 u32 mode;
303 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800304 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800305
306 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700307 mode = readl_relaxed(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800308 /* De-assert active-low PLL reset. */
309 mode |= PLL_RESET_N;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700310 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800311
312 /*
313 * H/W requires a 5us delay between disabling the bypass and
314 * de-asserting the reset. Delay 10us just to be safe.
315 */
316 mb();
317 udelay(10);
318
319 /* Disable PLL bypass mode. */
320 mode |= PLL_BYPASSNL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700321 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800322
323 /* Wait until PLL is locked. */
324 mb();
325 udelay(60);
326
327 /* Enable PLL output. */
328 mode |= PLL_OUTCTRL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700329 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800330
331 /* Ensure that the write above goes through before returning. */
332 mb();
333
334 spin_unlock_irqrestore(&pll_reg_lock, flags);
335
336 return 0;
337}
338
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700339int sr_hpm_lp_pll_clk_enable(struct clk *c)
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700340{
341 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800342 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700343 u32 count, mode;
344 int ret = 0;
345
346 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700347
348 /* Disable PLL bypass mode and de-assert reset. */
349 mode = PLL_BYPASSNL | PLL_RESET_N;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700350 writel_relaxed(mode, PLL_MODE_REG(pll));
351
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700352 /* Wait for pll to lock. */
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700353 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
354 if (readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)
355 break;
356 udelay(1);
357 }
358
359 if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)) {
Matt Wagantallf82f2942012-01-27 13:56:13 -0800360 WARN("PLL %s didn't lock after enabling it!\n", c->dbg_name);
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700361 ret = -ETIMEDOUT;
362 goto out;
363 }
364
365 /* Enable PLL output. */
Matt Wagantall10dde282012-05-14 19:30:11 -0700366 mode |= PLL_OUTCTRL;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700367 writel_relaxed(mode, PLL_MODE_REG(pll));
368
369 /* Ensure the write above goes through before returning. */
370 mb();
371
372out:
373 spin_unlock_irqrestore(&pll_reg_lock, flags);
374 return ret;
375}
376
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800377struct clk_ops clk_ops_local_pll = {
378 .enable = local_pll_clk_enable,
379 .disable = local_pll_clk_disable,
Tianyi Goua836dd32012-09-14 10:21:10 -0700380 .set_rate = local_pll_clk_set_rate,
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700381 .handoff = local_pll_clk_handoff,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800382};
383
Patrick Daly79323142012-12-05 15:06:42 -0800384struct clk_ops clk_ops_sr2_pll = {
385 .enable = sr2_pll_clk_enable,
386 .disable = local_pll_clk_disable,
387 .set_rate = local_pll_clk_set_rate,
388 .handoff = local_pll_clk_handoff,
389};
390
Pankaj Kumar3912c982011-12-07 16:59:03 +0530391struct pll_rate {
392 unsigned int lvalue;
393 unsigned long rate;
394};
395
396static struct pll_rate pll_l_rate[] = {
397 {10, 196000000},
398 {12, 245760000},
399 {30, 589820000},
400 {38, 737280000},
401 {41, 800000000},
402 {50, 960000000},
403 {52, 1008000000},
Trilok Soni4a0be012012-10-16 16:26:24 +0530404 {57, 1104000000},
Trilok Soni48631722012-05-17 20:56:42 +0530405 {60, 1152000000},
Pankaj Kumar3912c982011-12-07 16:59:03 +0530406 {62, 1200000000},
Pankaj Kumar50c705c2012-01-10 12:02:07 +0530407 {63, 1209600000},
Kaushal Kumar86473f02012-06-28 19:35:58 +0530408 {73, 1401600000},
Pankaj Kumar3912c982011-12-07 16:59:03 +0530409 {0, 0},
410};
411
412#define PLL_BASE 7
413
414struct shared_pll_control {
415 uint32_t version;
416 struct {
417 /*
418 * Denotes if the PLL is ON. Technically, this can be read
419 * directly from the PLL registers, but this feild is here,
420 * so let's use it.
421 */
422 uint32_t on;
423 /*
424 * One bit for each processor core. The application processor
425 * is allocated bit position 1. All other bits should be
426 * considered as votes from other processors.
427 */
428 uint32_t votes;
429 } pll[PLL_BASE + PLL_END];
430};
431
432static remote_spinlock_t pll_lock;
433static struct shared_pll_control *pll_control;
434
435void __init msm_shared_pll_control_init(void)
436{
437#define PLL_REMOTE_SPINLOCK_ID "S:7"
438 unsigned smem_size;
439
440 remote_spin_lock_init(&pll_lock, PLL_REMOTE_SPINLOCK_ID);
441
442 pll_control = smem_get_entry(SMEM_CLKREGIM_SOURCES, &smem_size);
443 if (!pll_control) {
444 pr_err("Can't find shared PLL control data structure!\n");
445 BUG();
446 /*
447 * There might be more PLLs than what the application processor knows
448 * about. But the index used for each PLL is guaranteed to remain the
449 * same.
450 */
451 } else if (smem_size < sizeof(struct shared_pll_control)) {
452 pr_err("Shared PLL control data"
453 "structure too small!\n");
454 BUG();
455 } else if (pll_control->version != 0xCCEE0001) {
456 pr_err("Shared PLL control version mismatch!\n");
457 BUG();
458 } else {
459 pr_info("Shared PLL control available.\n");
460 return;
461 }
462
463}
464
Matt Wagantallf82f2942012-01-27 13:56:13 -0800465static int pll_clk_enable(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530466{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800467 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530468 unsigned int pll_id = pll->id;
469
470 remote_spin_lock(&pll_lock);
471
472 pll_control->pll[PLL_BASE + pll_id].votes |= BIT(1);
473 if (!pll_control->pll[PLL_BASE + pll_id].on) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700474 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530475 pll_control->pll[PLL_BASE + pll_id].on = 1;
476 }
477
478 remote_spin_unlock(&pll_lock);
479 return 0;
480}
481
Matt Wagantallf82f2942012-01-27 13:56:13 -0800482static void pll_clk_disable(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530483{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800484 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530485 unsigned int pll_id = pll->id;
486
487 remote_spin_lock(&pll_lock);
488
489 pll_control->pll[PLL_BASE + pll_id].votes &= ~BIT(1);
490 if (pll_control->pll[PLL_BASE + pll_id].on
491 && !pll_control->pll[PLL_BASE + pll_id].votes) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700492 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530493 pll_control->pll[PLL_BASE + pll_id].on = 0;
494 }
495
496 remote_spin_unlock(&pll_lock);
497}
498
Matt Wagantallf82f2942012-01-27 13:56:13 -0800499static int pll_clk_is_enabled(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530500{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800501 return readl_relaxed(PLL_MODE_REG(to_pll_shared_clk(c))) & BIT(0);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530502}
503
Matt Wagantallf82f2942012-01-27 13:56:13 -0800504static enum handoff pll_clk_handoff(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530505{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800506 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530507 unsigned int pll_lval;
508 struct pll_rate *l;
509
510 /*
511 * Wait for the PLLs to be initialized and then read their frequency.
512 */
513 do {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700514 pll_lval = readl_relaxed(PLL_MODE_REG(pll) + 4) & 0x3ff;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530515 cpu_relax();
516 udelay(50);
517 } while (pll_lval == 0);
518
519 /* Convert PLL L values to PLL Output rate */
520 for (l = pll_l_rate; l->rate != 0; l++) {
521 if (l->lvalue == pll_lval) {
Matt Wagantallf82f2942012-01-27 13:56:13 -0800522 c->rate = l->rate;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530523 break;
524 }
525 }
526
Matt Wagantallf82f2942012-01-27 13:56:13 -0800527 if (!c->rate) {
Pankaj Kumar3912c982011-12-07 16:59:03 +0530528 pr_crit("Unknown PLL's L value!\n");
529 BUG();
530 }
531
Matt Wagantalla15833b2012-04-03 11:00:56 -0700532 return HANDOFF_ENABLED_CLK;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530533}
534
Matt Wagantallae053222012-05-14 19:42:07 -0700535struct clk_ops clk_ops_pll = {
Pankaj Kumar3912c982011-12-07 16:59:03 +0530536 .enable = pll_clk_enable,
537 .disable = pll_clk_disable,
538 .handoff = pll_clk_handoff,
Pankaj Kumar3912c982011-12-07 16:59:03 +0530539 .is_enabled = pll_clk_is_enabled,
540};
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700541
Tianyi Goub0f74a92012-10-11 14:10:08 -0700542static DEFINE_SPINLOCK(soft_vote_lock);
543
544static int pll_acpu_vote_clk_enable(struct clk *c)
545{
546 int ret = 0;
547 unsigned long flags;
548 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
549
550 spin_lock_irqsave(&soft_vote_lock, flags);
551
552 if (!*pllv->soft_vote)
553 ret = pll_vote_clk_enable(c);
554 if (ret == 0)
555 *pllv->soft_vote |= (pllv->soft_vote_mask);
556
557 spin_unlock_irqrestore(&soft_vote_lock, flags);
558 return ret;
559}
560
561static void pll_acpu_vote_clk_disable(struct clk *c)
562{
563 unsigned long flags;
564 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
565
566 spin_lock_irqsave(&soft_vote_lock, flags);
567
568 *pllv->soft_vote &= ~(pllv->soft_vote_mask);
569 if (!*pllv->soft_vote)
570 pll_vote_clk_disable(c);
571
572 spin_unlock_irqrestore(&soft_vote_lock, flags);
573}
574
575struct clk_ops clk_ops_pll_acpu_vote = {
576 .enable = pll_acpu_vote_clk_enable,
577 .disable = pll_acpu_vote_clk_disable,
578 .is_enabled = pll_vote_clk_is_enabled,
Tianyi Goub0f74a92012-10-11 14:10:08 -0700579};
580
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700581static void __init __set_fsm_mode(void __iomem *mode_reg,
582 u32 bias_count, u32 lock_count)
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700583{
584 u32 regval = readl_relaxed(mode_reg);
585
586 /* De-assert reset to FSM */
587 regval &= ~BIT(21);
588 writel_relaxed(regval, mode_reg);
589
590 /* Program bias count */
591 regval &= ~BM(19, 14);
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700592 regval |= BVAL(19, 14, bias_count);
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700593 writel_relaxed(regval, mode_reg);
594
595 /* Program lock count */
596 regval &= ~BM(13, 8);
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700597 regval |= BVAL(13, 8, lock_count);
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700598 writel_relaxed(regval, mode_reg);
599
600 /* Enable PLL FSM voting */
601 regval |= BIT(20);
602 writel_relaxed(regval, mode_reg);
603}
604
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700605void __init __configure_pll(struct pll_config *config,
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700606 struct pll_config_regs *regs, u32 ena_fsm_mode)
607{
608 u32 regval;
609
610 writel_relaxed(config->l, PLL_L_REG(regs));
611 writel_relaxed(config->m, PLL_M_REG(regs));
612 writel_relaxed(config->n, PLL_N_REG(regs));
613
614 regval = readl_relaxed(PLL_CONFIG_REG(regs));
615
616 /* Enable the MN accumulator */
617 if (config->mn_ena_mask) {
618 regval &= ~config->mn_ena_mask;
619 regval |= config->mn_ena_val;
620 }
621
622 /* Enable the main output */
623 if (config->main_output_mask) {
624 regval &= ~config->main_output_mask;
625 regval |= config->main_output_val;
626 }
627
628 /* Set pre-divider and post-divider values */
629 regval &= ~config->pre_div_mask;
630 regval |= config->pre_div_val;
631 regval &= ~config->post_div_mask;
632 regval |= config->post_div_val;
633
634 /* Select VCO setting */
635 regval &= ~config->vco_mask;
636 regval |= config->vco_val;
637 writel_relaxed(regval, PLL_CONFIG_REG(regs));
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700638}
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700639
640void __init configure_sr_pll(struct pll_config *config,
641 struct pll_config_regs *regs, u32 ena_fsm_mode)
642{
643 __configure_pll(config, regs, ena_fsm_mode);
644 if (ena_fsm_mode)
645 __set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x8);
646}
647
648void __init configure_sr_hpm_lp_pll(struct pll_config *config,
649 struct pll_config_regs *regs, u32 ena_fsm_mode)
650{
651 __configure_pll(config, regs, ena_fsm_mode);
652 if (ena_fsm_mode)
653 __set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x0);
654}
655