blob: 8d99ad1d33953cfa71f2b78cc5ff1120eeb531b3 [file] [log] [blame]
Pankaj Kumar3912c982011-12-07 16:59:03 +05301/*
Jeff Hugo5ba15fe2013-05-06 14:24:24 -06002 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Pankaj Kumar3912c982011-12-07 16:59:03 +05303 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/remote_spinlock.h>
19
Vikram Mulukutla681d8682012-03-09 23:56:20 -080020#include <mach/scm-io.h>
Pankaj Kumar3912c982011-12-07 16:59:03 +053021#include <mach/msm_iomap.h>
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060022#include <mach/msm_smem.h>
Pankaj Kumar3912c982011-12-07 16:59:03 +053023
24#include "clock.h"
25#include "clock-pll.h"
Pankaj Kumar3912c982011-12-07 16:59:03 +053026
Vikram Mulukutla681d8682012-03-09 23:56:20 -080027#ifdef CONFIG_MSM_SECURE_IO
28#undef readl_relaxed
29#undef writel_relaxed
30#define readl_relaxed secure_readl
31#define writel_relaxed secure_writel
32#endif
33
34#define PLL_OUTCTRL BIT(0)
35#define PLL_BYPASSNL BIT(1)
36#define PLL_RESET_N BIT(2)
37#define PLL_MODE_MASK BM(3, 0)
38
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070039#define PLL_EN_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->en_reg)) : \
40 ((x)->en_reg))
41#define PLL_STATUS_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->status_reg)) : \
42 ((x)->status_reg))
43#define PLL_MODE_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->mode_reg)) : \
44 ((x)->mode_reg))
Vikram Mulukutla5b146722012-04-23 18:17:50 -070045#define PLL_L_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->l_reg)) : \
46 ((x)->l_reg))
47#define PLL_M_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->m_reg)) : \
48 ((x)->m_reg))
49#define PLL_N_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->n_reg)) : \
50 ((x)->n_reg))
51#define PLL_CONFIG_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->config_reg)) : \
52 ((x)->config_reg))
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070053
Vikram Mulukutla681d8682012-03-09 23:56:20 -080054static DEFINE_SPINLOCK(pll_reg_lock);
55
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070056#define ENABLE_WAIT_MAX_LOOPS 200
Patrick Daly79323142012-12-05 15:06:42 -080057#define PLL_LOCKED_BIT BIT(16)
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070058
Tianyi Goub0f74a92012-10-11 14:10:08 -070059static int pll_vote_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -080060{
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070061 u32 ena, count;
Vikram Mulukutla681d8682012-03-09 23:56:20 -080062 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -080063 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -080064
65 spin_lock_irqsave(&pll_reg_lock, flags);
Matt Wagantallf82f2942012-01-27 13:56:13 -080066 ena = readl_relaxed(PLL_EN_REG(pllv));
67 ena |= pllv->en_mask;
68 writel_relaxed(ena, PLL_EN_REG(pllv));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080069 spin_unlock_irqrestore(&pll_reg_lock, flags);
70
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070071 /*
72 * Use a memory barrier since some PLL status registers are
73 * not within the same 1K segment as the voting registers.
74 */
75 mb();
Vikram Mulukutla681d8682012-03-09 23:56:20 -080076
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070077 /* Wait for pll to enable. */
78 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
Matt Wagantallf82f2942012-01-27 13:56:13 -080079 if (readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask)
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070080 return 0;
81 udelay(1);
82 }
83
Matt Wagantallf82f2942012-01-27 13:56:13 -080084 WARN("PLL %s didn't enable after voting for it!\n", c->dbg_name);
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070085
86 return -ETIMEDOUT;
Vikram Mulukutla681d8682012-03-09 23:56:20 -080087}
88
Tianyi Goub0f74a92012-10-11 14:10:08 -070089static void pll_vote_clk_disable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -080090{
91 u32 ena;
92 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -080093 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -080094
95 spin_lock_irqsave(&pll_reg_lock, flags);
Matt Wagantallf82f2942012-01-27 13:56:13 -080096 ena = readl_relaxed(PLL_EN_REG(pllv));
97 ena &= ~(pllv->en_mask);
98 writel_relaxed(ena, PLL_EN_REG(pllv));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080099 spin_unlock_irqrestore(&pll_reg_lock, flags);
100}
101
Tianyi Goub0f74a92012-10-11 14:10:08 -0700102static int pll_vote_clk_is_enabled(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800103{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800104 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
105 return !!(readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800106}
107
Matt Wagantallf82f2942012-01-27 13:56:13 -0800108static enum handoff pll_vote_clk_handoff(struct clk *c)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700109{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800110 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
111 if (readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700112 return HANDOFF_ENABLED_CLK;
113
114 return HANDOFF_DISABLED_CLK;
115}
116
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800117struct clk_ops clk_ops_pll_vote = {
118 .enable = pll_vote_clk_enable,
119 .disable = pll_vote_clk_disable,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800120 .is_enabled = pll_vote_clk_is_enabled,
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700121 .handoff = pll_vote_clk_handoff,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800122};
123
Tianyi Goua836dd32012-09-14 10:21:10 -0700124static void __pll_config_reg(void __iomem *pll_config, struct pll_freq_tbl *f,
125 struct pll_config_masks *masks)
126{
127 u32 regval;
128
129 regval = readl_relaxed(pll_config);
130
131 /* Enable the MN counter if used */
132 if (f->m_val)
133 regval |= masks->mn_en_mask;
134
135 /* Set pre-divider and post-divider values */
136 regval &= ~masks->pre_div_mask;
137 regval |= f->pre_div_val;
138 regval &= ~masks->post_div_mask;
139 regval |= f->post_div_val;
140
141 /* Select VCO setting */
142 regval &= ~masks->vco_mask;
143 regval |= f->vco_val;
144
145 /* Enable main output if it has not been enabled */
146 if (masks->main_output_mask && !(regval & masks->main_output_mask))
147 regval |= masks->main_output_mask;
148
149 writel_relaxed(regval, pll_config);
150}
151
Patrick Daly79323142012-12-05 15:06:42 -0800152static int sr2_pll_clk_enable(struct clk *c)
153{
154 unsigned long flags;
155 struct pll_clk *pll = to_pll_clk(c);
156 int ret = 0, count;
157 u32 mode = readl_relaxed(PLL_MODE_REG(pll));
158
159 spin_lock_irqsave(&pll_reg_lock, flags);
160
161 /* Disable PLL bypass mode. */
162 mode |= PLL_BYPASSNL;
163 writel_relaxed(mode, PLL_MODE_REG(pll));
164
165 /*
166 * H/W requires a 5us delay between disabling the bypass and
167 * de-asserting the reset. Delay 10us just to be safe.
168 */
169 mb();
170 udelay(10);
171
172 /* De-assert active-low PLL reset. */
173 mode |= PLL_RESET_N;
174 writel_relaxed(mode, PLL_MODE_REG(pll));
175
176 /* Wait for pll to lock. */
177 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
178 if (readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)
179 break;
180 udelay(1);
181 }
182
183 if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT))
184 pr_err("PLL %s didn't lock after enabling it!\n", c->dbg_name);
185
186 /* Enable PLL output. */
187 mode |= PLL_OUTCTRL;
188 writel_relaxed(mode, PLL_MODE_REG(pll));
189
190 /* Ensure that the write above goes through before returning. */
191 mb();
192
193 spin_unlock_irqrestore(&pll_reg_lock, flags);
194 return ret;
195}
196
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800197static void __pll_clk_enable_reg(void __iomem *mode_reg)
198{
199 u32 mode = readl_relaxed(mode_reg);
200 /* Disable PLL bypass mode. */
201 mode |= PLL_BYPASSNL;
202 writel_relaxed(mode, mode_reg);
203
204 /*
205 * H/W requires a 5us delay between disabling the bypass and
206 * de-asserting the reset. Delay 10us just to be safe.
207 */
208 mb();
209 udelay(10);
210
211 /* De-assert active-low PLL reset. */
212 mode |= PLL_RESET_N;
213 writel_relaxed(mode, mode_reg);
214
215 /* Wait until PLL is locked. */
216 mb();
217 udelay(50);
218
219 /* Enable PLL output. */
220 mode |= PLL_OUTCTRL;
221 writel_relaxed(mode, mode_reg);
222
223 /* Ensure that the write above goes through before returning. */
224 mb();
225}
226
Matt Wagantallf82f2942012-01-27 13:56:13 -0800227static int local_pll_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800228{
229 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800230 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800231
232 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700233 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800234 spin_unlock_irqrestore(&pll_reg_lock, flags);
235
236 return 0;
237}
238
239static void __pll_clk_disable_reg(void __iomem *mode_reg)
240{
241 u32 mode = readl_relaxed(mode_reg);
242 mode &= ~PLL_MODE_MASK;
243 writel_relaxed(mode, mode_reg);
244}
245
Matt Wagantallf82f2942012-01-27 13:56:13 -0800246static void local_pll_clk_disable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800247{
248 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800249 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800250
251 /*
252 * Disable the PLL output, disable test mode, enable
253 * the bypass mode, and assert the reset.
254 */
255 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700256 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800257 spin_unlock_irqrestore(&pll_reg_lock, flags);
258}
259
Matt Wagantallf82f2942012-01-27 13:56:13 -0800260static enum handoff local_pll_clk_handoff(struct clk *c)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700261{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800262 struct pll_clk *pll = to_pll_clk(c);
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700263 u32 mode = readl_relaxed(PLL_MODE_REG(pll));
264 u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
265
266 if ((mode & mask) == mask)
267 return HANDOFF_ENABLED_CLK;
268
269 return HANDOFF_DISABLED_CLK;
270}
271
Tianyi Goua836dd32012-09-14 10:21:10 -0700272static int local_pll_clk_set_rate(struct clk *c, unsigned long rate)
273{
274 struct pll_freq_tbl *nf;
275 struct pll_clk *pll = to_pll_clk(c);
276 u32 mode;
277
278 mode = readl_relaxed(PLL_MODE_REG(pll));
279
280 /* Don't change PLL's rate if it is enabled */
281 if ((mode & PLL_MODE_MASK) == PLL_MODE_MASK)
282 return -EBUSY;
283
284 for (nf = pll->freq_tbl; nf->freq_hz != PLL_FREQ_END
285 && nf->freq_hz != rate; nf++)
286 ;
287
288 if (nf->freq_hz == PLL_FREQ_END)
289 return -EINVAL;
290
291 writel_relaxed(nf->l_val, PLL_L_REG(pll));
292 writel_relaxed(nf->m_val, PLL_M_REG(pll));
293 writel_relaxed(nf->n_val, PLL_N_REG(pll));
294
295 __pll_config_reg(PLL_CONFIG_REG(pll), nf, &pll->masks);
296
297 return 0;
298}
299
Matt Wagantallf82f2942012-01-27 13:56:13 -0800300int sr_pll_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800301{
302 u32 mode;
303 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800304 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800305
306 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700307 mode = readl_relaxed(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800308 /* De-assert active-low PLL reset. */
309 mode |= PLL_RESET_N;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700310 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800311
312 /*
313 * H/W requires a 5us delay between disabling the bypass and
314 * de-asserting the reset. Delay 10us just to be safe.
315 */
316 mb();
317 udelay(10);
318
319 /* Disable PLL bypass mode. */
320 mode |= PLL_BYPASSNL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700321 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800322
323 /* Wait until PLL is locked. */
324 mb();
325 udelay(60);
326
327 /* Enable PLL output. */
328 mode |= PLL_OUTCTRL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700329 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800330
331 /* Ensure that the write above goes through before returning. */
332 mb();
333
334 spin_unlock_irqrestore(&pll_reg_lock, flags);
335
336 return 0;
337}
338
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700339int sr_hpm_lp_pll_clk_enable(struct clk *c)
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700340{
341 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800342 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700343 u32 count, mode;
344 int ret = 0;
345
346 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700347
348 /* Disable PLL bypass mode and de-assert reset. */
349 mode = PLL_BYPASSNL | PLL_RESET_N;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700350 writel_relaxed(mode, PLL_MODE_REG(pll));
351
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700352 /* Wait for pll to lock. */
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700353 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
354 if (readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)
355 break;
356 udelay(1);
357 }
358
359 if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)) {
Matt Wagantallf82f2942012-01-27 13:56:13 -0800360 WARN("PLL %s didn't lock after enabling it!\n", c->dbg_name);
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700361 ret = -ETIMEDOUT;
362 goto out;
363 }
364
365 /* Enable PLL output. */
Matt Wagantall10dde282012-05-14 19:30:11 -0700366 mode |= PLL_OUTCTRL;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700367 writel_relaxed(mode, PLL_MODE_REG(pll));
368
369 /* Ensure the write above goes through before returning. */
370 mb();
371
372out:
373 spin_unlock_irqrestore(&pll_reg_lock, flags);
374 return ret;
375}
376
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800377struct clk_ops clk_ops_local_pll = {
378 .enable = local_pll_clk_enable,
379 .disable = local_pll_clk_disable,
Tianyi Goua836dd32012-09-14 10:21:10 -0700380 .set_rate = local_pll_clk_set_rate,
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700381 .handoff = local_pll_clk_handoff,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800382};
383
Patrick Daly79323142012-12-05 15:06:42 -0800384struct clk_ops clk_ops_sr2_pll = {
385 .enable = sr2_pll_clk_enable,
386 .disable = local_pll_clk_disable,
387 .set_rate = local_pll_clk_set_rate,
388 .handoff = local_pll_clk_handoff,
389};
390
Pankaj Kumar3912c982011-12-07 16:59:03 +0530391struct pll_rate {
392 unsigned int lvalue;
393 unsigned long rate;
394};
395
396static struct pll_rate pll_l_rate[] = {
397 {10, 196000000},
398 {12, 245760000},
399 {30, 589820000},
400 {38, 737280000},
401 {41, 800000000},
402 {50, 960000000},
403 {52, 1008000000},
Trilok Soni4a0be012012-10-16 16:26:24 +0530404 {57, 1104000000},
Trilok Soni48631722012-05-17 20:56:42 +0530405 {60, 1152000000},
Pankaj Kumar3912c982011-12-07 16:59:03 +0530406 {62, 1200000000},
Pankaj Kumar50c705c2012-01-10 12:02:07 +0530407 {63, 1209600000},
Kaushal Kumar86473f02012-06-28 19:35:58 +0530408 {73, 1401600000},
Pankaj Kumar3912c982011-12-07 16:59:03 +0530409 {0, 0},
410};
411
412#define PLL_BASE 7
413
414struct shared_pll_control {
415 uint32_t version;
416 struct {
417 /*
418 * Denotes if the PLL is ON. Technically, this can be read
419 * directly from the PLL registers, but this feild is here,
420 * so let's use it.
421 */
422 uint32_t on;
423 /*
424 * One bit for each processor core. The application processor
425 * is allocated bit position 1. All other bits should be
426 * considered as votes from other processors.
427 */
428 uint32_t votes;
429 } pll[PLL_BASE + PLL_END];
430};
431
432static remote_spinlock_t pll_lock;
433static struct shared_pll_control *pll_control;
434
435void __init msm_shared_pll_control_init(void)
436{
437#define PLL_REMOTE_SPINLOCK_ID "S:7"
438 unsigned smem_size;
439
440 remote_spin_lock_init(&pll_lock, PLL_REMOTE_SPINLOCK_ID);
441
442 pll_control = smem_get_entry(SMEM_CLKREGIM_SOURCES, &smem_size);
443 if (!pll_control) {
444 pr_err("Can't find shared PLL control data structure!\n");
445 BUG();
446 /*
447 * There might be more PLLs than what the application processor knows
448 * about. But the index used for each PLL is guaranteed to remain the
449 * same.
450 */
451 } else if (smem_size < sizeof(struct shared_pll_control)) {
452 pr_err("Shared PLL control data"
453 "structure too small!\n");
454 BUG();
455 } else if (pll_control->version != 0xCCEE0001) {
456 pr_err("Shared PLL control version mismatch!\n");
457 BUG();
458 } else {
459 pr_info("Shared PLL control available.\n");
460 return;
461 }
462
463}
464
Matt Wagantallf82f2942012-01-27 13:56:13 -0800465static int pll_clk_enable(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530466{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800467 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530468 unsigned int pll_id = pll->id;
469
470 remote_spin_lock(&pll_lock);
471
472 pll_control->pll[PLL_BASE + pll_id].votes |= BIT(1);
473 if (!pll_control->pll[PLL_BASE + pll_id].on) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700474 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530475 pll_control->pll[PLL_BASE + pll_id].on = 1;
476 }
477
478 remote_spin_unlock(&pll_lock);
479 return 0;
480}
481
Matt Wagantallf82f2942012-01-27 13:56:13 -0800482static void pll_clk_disable(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530483{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800484 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530485 unsigned int pll_id = pll->id;
486
487 remote_spin_lock(&pll_lock);
488
489 pll_control->pll[PLL_BASE + pll_id].votes &= ~BIT(1);
490 if (pll_control->pll[PLL_BASE + pll_id].on
491 && !pll_control->pll[PLL_BASE + pll_id].votes) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700492 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530493 pll_control->pll[PLL_BASE + pll_id].on = 0;
494 }
495
496 remote_spin_unlock(&pll_lock);
497}
498
Matt Wagantallf82f2942012-01-27 13:56:13 -0800499static int pll_clk_is_enabled(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530500{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800501 return readl_relaxed(PLL_MODE_REG(to_pll_shared_clk(c))) & BIT(0);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530502}
503
Matt Wagantallf82f2942012-01-27 13:56:13 -0800504static enum handoff pll_clk_handoff(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530505{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800506 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530507 unsigned int pll_lval;
508 struct pll_rate *l;
509
510 /*
511 * Wait for the PLLs to be initialized and then read their frequency.
512 */
513 do {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700514 pll_lval = readl_relaxed(PLL_MODE_REG(pll) + 4) & 0x3ff;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530515 cpu_relax();
516 udelay(50);
517 } while (pll_lval == 0);
518
519 /* Convert PLL L values to PLL Output rate */
520 for (l = pll_l_rate; l->rate != 0; l++) {
521 if (l->lvalue == pll_lval) {
Matt Wagantallf82f2942012-01-27 13:56:13 -0800522 c->rate = l->rate;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530523 break;
524 }
525 }
526
Matt Wagantallf82f2942012-01-27 13:56:13 -0800527 if (!c->rate) {
Pankaj Kumar3912c982011-12-07 16:59:03 +0530528 pr_crit("Unknown PLL's L value!\n");
529 BUG();
530 }
531
Saravana Kannan87f0d1b2013-02-07 14:09:36 -0800532 if (!pll_clk_is_enabled(c))
533 return HANDOFF_DISABLED_CLK;
534
535 /*
536 * Do not call pll_clk_enable() since that function can assume
537 * the PLL is not in use when it's called.
538 */
539 remote_spin_lock(&pll_lock);
540 pll_control->pll[PLL_BASE + pll->id].votes |= BIT(1);
541 pll_control->pll[PLL_BASE + pll->id].on = 1;
542 remote_spin_unlock(&pll_lock);
543
Matt Wagantalla15833b2012-04-03 11:00:56 -0700544 return HANDOFF_ENABLED_CLK;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530545}
546
Matt Wagantallae053222012-05-14 19:42:07 -0700547struct clk_ops clk_ops_pll = {
Pankaj Kumar3912c982011-12-07 16:59:03 +0530548 .enable = pll_clk_enable,
549 .disable = pll_clk_disable,
550 .handoff = pll_clk_handoff,
Pankaj Kumar3912c982011-12-07 16:59:03 +0530551 .is_enabled = pll_clk_is_enabled,
552};
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700553
Tianyi Goub0f74a92012-10-11 14:10:08 -0700554static DEFINE_SPINLOCK(soft_vote_lock);
555
556static int pll_acpu_vote_clk_enable(struct clk *c)
557{
558 int ret = 0;
559 unsigned long flags;
560 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
561
562 spin_lock_irqsave(&soft_vote_lock, flags);
563
564 if (!*pllv->soft_vote)
565 ret = pll_vote_clk_enable(c);
566 if (ret == 0)
567 *pllv->soft_vote |= (pllv->soft_vote_mask);
568
569 spin_unlock_irqrestore(&soft_vote_lock, flags);
570 return ret;
571}
572
573static void pll_acpu_vote_clk_disable(struct clk *c)
574{
575 unsigned long flags;
576 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
577
578 spin_lock_irqsave(&soft_vote_lock, flags);
579
580 *pllv->soft_vote &= ~(pllv->soft_vote_mask);
581 if (!*pllv->soft_vote)
582 pll_vote_clk_disable(c);
583
584 spin_unlock_irqrestore(&soft_vote_lock, flags);
585}
586
Saravana Kannan87f0d1b2013-02-07 14:09:36 -0800587static enum handoff pll_acpu_vote_clk_handoff(struct clk *c)
588{
589 if (pll_vote_clk_handoff(c) == HANDOFF_DISABLED_CLK)
590 return HANDOFF_DISABLED_CLK;
591
592 if (pll_acpu_vote_clk_enable(c))
593 return HANDOFF_DISABLED_CLK;
594
595 return HANDOFF_ENABLED_CLK;
596}
597
Tianyi Goub0f74a92012-10-11 14:10:08 -0700598struct clk_ops clk_ops_pll_acpu_vote = {
599 .enable = pll_acpu_vote_clk_enable,
600 .disable = pll_acpu_vote_clk_disable,
601 .is_enabled = pll_vote_clk_is_enabled,
Saravana Kannan87f0d1b2013-02-07 14:09:36 -0800602 .handoff = pll_acpu_vote_clk_handoff,
Tianyi Goub0f74a92012-10-11 14:10:08 -0700603};
604
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700605static void __init __set_fsm_mode(void __iomem *mode_reg,
606 u32 bias_count, u32 lock_count)
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700607{
608 u32 regval = readl_relaxed(mode_reg);
609
610 /* De-assert reset to FSM */
611 regval &= ~BIT(21);
612 writel_relaxed(regval, mode_reg);
613
614 /* Program bias count */
615 regval &= ~BM(19, 14);
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700616 regval |= BVAL(19, 14, bias_count);
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700617 writel_relaxed(regval, mode_reg);
618
619 /* Program lock count */
620 regval &= ~BM(13, 8);
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700621 regval |= BVAL(13, 8, lock_count);
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700622 writel_relaxed(regval, mode_reg);
623
624 /* Enable PLL FSM voting */
625 regval |= BIT(20);
626 writel_relaxed(regval, mode_reg);
627}
628
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700629void __init __configure_pll(struct pll_config *config,
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700630 struct pll_config_regs *regs, u32 ena_fsm_mode)
631{
632 u32 regval;
633
634 writel_relaxed(config->l, PLL_L_REG(regs));
635 writel_relaxed(config->m, PLL_M_REG(regs));
636 writel_relaxed(config->n, PLL_N_REG(regs));
637
638 regval = readl_relaxed(PLL_CONFIG_REG(regs));
639
640 /* Enable the MN accumulator */
641 if (config->mn_ena_mask) {
642 regval &= ~config->mn_ena_mask;
643 regval |= config->mn_ena_val;
644 }
645
646 /* Enable the main output */
647 if (config->main_output_mask) {
648 regval &= ~config->main_output_mask;
649 regval |= config->main_output_val;
650 }
651
652 /* Set pre-divider and post-divider values */
653 regval &= ~config->pre_div_mask;
654 regval |= config->pre_div_val;
655 regval &= ~config->post_div_mask;
656 regval |= config->post_div_val;
657
658 /* Select VCO setting */
659 regval &= ~config->vco_mask;
660 regval |= config->vco_val;
661 writel_relaxed(regval, PLL_CONFIG_REG(regs));
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700662}
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700663
664void __init configure_sr_pll(struct pll_config *config,
665 struct pll_config_regs *regs, u32 ena_fsm_mode)
666{
667 __configure_pll(config, regs, ena_fsm_mode);
668 if (ena_fsm_mode)
669 __set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x8);
670}
671
672void __init configure_sr_hpm_lp_pll(struct pll_config *config,
673 struct pll_config_regs *regs, u32 ena_fsm_mode)
674{
675 __configure_pll(config, regs, ena_fsm_mode);
676 if (ena_fsm_mode)
677 __set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x0);
678}
679