blob: fcdcb295011b3e3d083e6045e8cbaec85663b851 [file] [log] [blame]
Pankaj Kumar3912c982011-12-07 16:59:03 +05301/*
Jeff Hugo5ba15fe2013-05-06 14:24:24 -06002 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Pankaj Kumar3912c982011-12-07 16:59:03 +05303 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/delay.h>
17#include <linux/err.h>
Patrick Daly4414c0a2013-05-17 13:54:31 -070018#include <linux/clk.h>
Pankaj Kumar3912c982011-12-07 16:59:03 +053019#include <linux/remote_spinlock.h>
20
Vikram Mulukutla681d8682012-03-09 23:56:20 -080021#include <mach/scm-io.h>
Pankaj Kumar3912c982011-12-07 16:59:03 +053022#include <mach/msm_iomap.h>
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060023#include <mach/msm_smem.h>
Pankaj Kumar3912c982011-12-07 16:59:03 +053024
25#include "clock.h"
26#include "clock-pll.h"
Pankaj Kumar3912c982011-12-07 16:59:03 +053027
Vikram Mulukutla681d8682012-03-09 23:56:20 -080028#ifdef CONFIG_MSM_SECURE_IO
29#undef readl_relaxed
30#undef writel_relaxed
31#define readl_relaxed secure_readl
32#define writel_relaxed secure_writel
33#endif
34
35#define PLL_OUTCTRL BIT(0)
36#define PLL_BYPASSNL BIT(1)
37#define PLL_RESET_N BIT(2)
38#define PLL_MODE_MASK BM(3, 0)
39
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070040#define PLL_EN_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->en_reg)) : \
41 ((x)->en_reg))
42#define PLL_STATUS_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->status_reg)) : \
43 ((x)->status_reg))
44#define PLL_MODE_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->mode_reg)) : \
45 ((x)->mode_reg))
Vikram Mulukutla5b146722012-04-23 18:17:50 -070046#define PLL_L_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->l_reg)) : \
47 ((x)->l_reg))
48#define PLL_M_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->m_reg)) : \
49 ((x)->m_reg))
50#define PLL_N_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->n_reg)) : \
51 ((x)->n_reg))
52#define PLL_CONFIG_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->config_reg)) : \
53 ((x)->config_reg))
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070054
Vikram Mulukutla681d8682012-03-09 23:56:20 -080055static DEFINE_SPINLOCK(pll_reg_lock);
56
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070057#define ENABLE_WAIT_MAX_LOOPS 200
Patrick Daly79323142012-12-05 15:06:42 -080058#define PLL_LOCKED_BIT BIT(16)
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070059
Tianyi Goub0f74a92012-10-11 14:10:08 -070060static int pll_vote_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -080061{
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070062 u32 ena, count;
Vikram Mulukutla681d8682012-03-09 23:56:20 -080063 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -080064 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -080065
66 spin_lock_irqsave(&pll_reg_lock, flags);
Matt Wagantallf82f2942012-01-27 13:56:13 -080067 ena = readl_relaxed(PLL_EN_REG(pllv));
68 ena |= pllv->en_mask;
69 writel_relaxed(ena, PLL_EN_REG(pllv));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080070 spin_unlock_irqrestore(&pll_reg_lock, flags);
71
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070072 /*
73 * Use a memory barrier since some PLL status registers are
74 * not within the same 1K segment as the voting registers.
75 */
76 mb();
Vikram Mulukutla681d8682012-03-09 23:56:20 -080077
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070078 /* Wait for pll to enable. */
79 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
Matt Wagantallf82f2942012-01-27 13:56:13 -080080 if (readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask)
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070081 return 0;
82 udelay(1);
83 }
84
Matt Wagantallf82f2942012-01-27 13:56:13 -080085 WARN("PLL %s didn't enable after voting for it!\n", c->dbg_name);
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070086
87 return -ETIMEDOUT;
Vikram Mulukutla681d8682012-03-09 23:56:20 -080088}
89
Tianyi Goub0f74a92012-10-11 14:10:08 -070090static void pll_vote_clk_disable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -080091{
92 u32 ena;
93 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -080094 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -080095
96 spin_lock_irqsave(&pll_reg_lock, flags);
Matt Wagantallf82f2942012-01-27 13:56:13 -080097 ena = readl_relaxed(PLL_EN_REG(pllv));
98 ena &= ~(pllv->en_mask);
99 writel_relaxed(ena, PLL_EN_REG(pllv));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800100 spin_unlock_irqrestore(&pll_reg_lock, flags);
101}
102
Tianyi Goub0f74a92012-10-11 14:10:08 -0700103static int pll_vote_clk_is_enabled(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800104{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800105 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
106 return !!(readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800107}
108
Matt Wagantallf82f2942012-01-27 13:56:13 -0800109static enum handoff pll_vote_clk_handoff(struct clk *c)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700110{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800111 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
112 if (readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700113 return HANDOFF_ENABLED_CLK;
114
115 return HANDOFF_DISABLED_CLK;
116}
117
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800118struct clk_ops clk_ops_pll_vote = {
119 .enable = pll_vote_clk_enable,
120 .disable = pll_vote_clk_disable,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800121 .is_enabled = pll_vote_clk_is_enabled,
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700122 .handoff = pll_vote_clk_handoff,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800123};
124
Tianyi Goua836dd32012-09-14 10:21:10 -0700125static void __pll_config_reg(void __iomem *pll_config, struct pll_freq_tbl *f,
126 struct pll_config_masks *masks)
127{
128 u32 regval;
129
130 regval = readl_relaxed(pll_config);
131
132 /* Enable the MN counter if used */
133 if (f->m_val)
134 regval |= masks->mn_en_mask;
135
136 /* Set pre-divider and post-divider values */
137 regval &= ~masks->pre_div_mask;
138 regval |= f->pre_div_val;
139 regval &= ~masks->post_div_mask;
140 regval |= f->post_div_val;
141
142 /* Select VCO setting */
143 regval &= ~masks->vco_mask;
144 regval |= f->vco_val;
145
146 /* Enable main output if it has not been enabled */
147 if (masks->main_output_mask && !(regval & masks->main_output_mask))
148 regval |= masks->main_output_mask;
149
150 writel_relaxed(regval, pll_config);
151}
152
Patrick Daly79323142012-12-05 15:06:42 -0800153static int sr2_pll_clk_enable(struct clk *c)
154{
155 unsigned long flags;
156 struct pll_clk *pll = to_pll_clk(c);
157 int ret = 0, count;
158 u32 mode = readl_relaxed(PLL_MODE_REG(pll));
159
160 spin_lock_irqsave(&pll_reg_lock, flags);
161
162 /* Disable PLL bypass mode. */
163 mode |= PLL_BYPASSNL;
164 writel_relaxed(mode, PLL_MODE_REG(pll));
165
166 /*
167 * H/W requires a 5us delay between disabling the bypass and
168 * de-asserting the reset. Delay 10us just to be safe.
169 */
170 mb();
171 udelay(10);
172
173 /* De-assert active-low PLL reset. */
174 mode |= PLL_RESET_N;
175 writel_relaxed(mode, PLL_MODE_REG(pll));
176
177 /* Wait for pll to lock. */
178 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
179 if (readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)
180 break;
181 udelay(1);
182 }
183
184 if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT))
185 pr_err("PLL %s didn't lock after enabling it!\n", c->dbg_name);
186
187 /* Enable PLL output. */
188 mode |= PLL_OUTCTRL;
189 writel_relaxed(mode, PLL_MODE_REG(pll));
190
191 /* Ensure that the write above goes through before returning. */
192 mb();
193
194 spin_unlock_irqrestore(&pll_reg_lock, flags);
195 return ret;
196}
197
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800198static void __pll_clk_enable_reg(void __iomem *mode_reg)
199{
200 u32 mode = readl_relaxed(mode_reg);
201 /* Disable PLL bypass mode. */
202 mode |= PLL_BYPASSNL;
203 writel_relaxed(mode, mode_reg);
204
205 /*
206 * H/W requires a 5us delay between disabling the bypass and
207 * de-asserting the reset. Delay 10us just to be safe.
208 */
209 mb();
210 udelay(10);
211
212 /* De-assert active-low PLL reset. */
213 mode |= PLL_RESET_N;
214 writel_relaxed(mode, mode_reg);
215
216 /* Wait until PLL is locked. */
217 mb();
218 udelay(50);
219
220 /* Enable PLL output. */
221 mode |= PLL_OUTCTRL;
222 writel_relaxed(mode, mode_reg);
223
224 /* Ensure that the write above goes through before returning. */
225 mb();
226}
227
Matt Wagantallf82f2942012-01-27 13:56:13 -0800228static int local_pll_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800229{
230 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800231 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800232
233 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700234 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800235 spin_unlock_irqrestore(&pll_reg_lock, flags);
236
237 return 0;
238}
239
240static void __pll_clk_disable_reg(void __iomem *mode_reg)
241{
242 u32 mode = readl_relaxed(mode_reg);
243 mode &= ~PLL_MODE_MASK;
244 writel_relaxed(mode, mode_reg);
245}
246
Matt Wagantallf82f2942012-01-27 13:56:13 -0800247static void local_pll_clk_disable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800248{
249 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800250 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800251
252 /*
253 * Disable the PLL output, disable test mode, enable
254 * the bypass mode, and assert the reset.
255 */
256 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700257 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800258 spin_unlock_irqrestore(&pll_reg_lock, flags);
259}
260
Matt Wagantallf82f2942012-01-27 13:56:13 -0800261static enum handoff local_pll_clk_handoff(struct clk *c)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700262{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800263 struct pll_clk *pll = to_pll_clk(c);
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700264 u32 mode = readl_relaxed(PLL_MODE_REG(pll));
265 u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
Patrick Daly4414c0a2013-05-17 13:54:31 -0700266 unsigned long parent_rate;
267 u32 lval, mval, nval, userval;
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700268
Patrick Daly4414c0a2013-05-17 13:54:31 -0700269 if ((mode & mask) != mask)
270 return HANDOFF_DISABLED_CLK;
271
272 /* Assume bootloaders configure PLL to c->rate */
273 if (c->rate)
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700274 return HANDOFF_ENABLED_CLK;
275
Patrick Daly4414c0a2013-05-17 13:54:31 -0700276 parent_rate = clk_get_rate(c->parent);
277 lval = readl_relaxed(PLL_L_REG(pll));
278 mval = readl_relaxed(PLL_M_REG(pll));
279 nval = readl_relaxed(PLL_N_REG(pll));
280 userval = readl_relaxed(PLL_CONFIG_REG(pll));
281
282 c->rate = parent_rate * lval;
283
284 if (pll->masks.mn_en_mask && userval) {
285 if (!nval)
286 nval = 1;
287 c->rate += (parent_rate * mval) / nval;
288 }
289
290 return HANDOFF_ENABLED_CLK;
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700291}
292
Tianyi Goua836dd32012-09-14 10:21:10 -0700293static int local_pll_clk_set_rate(struct clk *c, unsigned long rate)
294{
295 struct pll_freq_tbl *nf;
296 struct pll_clk *pll = to_pll_clk(c);
297 u32 mode;
298
299 mode = readl_relaxed(PLL_MODE_REG(pll));
300
301 /* Don't change PLL's rate if it is enabled */
302 if ((mode & PLL_MODE_MASK) == PLL_MODE_MASK)
303 return -EBUSY;
304
305 for (nf = pll->freq_tbl; nf->freq_hz != PLL_FREQ_END
306 && nf->freq_hz != rate; nf++)
307 ;
308
309 if (nf->freq_hz == PLL_FREQ_END)
310 return -EINVAL;
311
312 writel_relaxed(nf->l_val, PLL_L_REG(pll));
313 writel_relaxed(nf->m_val, PLL_M_REG(pll));
314 writel_relaxed(nf->n_val, PLL_N_REG(pll));
315
316 __pll_config_reg(PLL_CONFIG_REG(pll), nf, &pll->masks);
317
318 return 0;
319}
320
Matt Wagantallf82f2942012-01-27 13:56:13 -0800321int sr_pll_clk_enable(struct clk *c)
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800322{
323 u32 mode;
324 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800325 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800326
327 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700328 mode = readl_relaxed(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800329 /* De-assert active-low PLL reset. */
330 mode |= PLL_RESET_N;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700331 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800332
333 /*
334 * H/W requires a 5us delay between disabling the bypass and
335 * de-asserting the reset. Delay 10us just to be safe.
336 */
337 mb();
338 udelay(10);
339
340 /* Disable PLL bypass mode. */
341 mode |= PLL_BYPASSNL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700342 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800343
344 /* Wait until PLL is locked. */
345 mb();
346 udelay(60);
347
348 /* Enable PLL output. */
349 mode |= PLL_OUTCTRL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700350 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800351
352 /* Ensure that the write above goes through before returning. */
353 mb();
354
355 spin_unlock_irqrestore(&pll_reg_lock, flags);
356
357 return 0;
358}
359
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700360int sr_hpm_lp_pll_clk_enable(struct clk *c)
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700361{
362 unsigned long flags;
Matt Wagantallf82f2942012-01-27 13:56:13 -0800363 struct pll_clk *pll = to_pll_clk(c);
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700364 u32 count, mode;
365 int ret = 0;
366
367 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700368
369 /* Disable PLL bypass mode and de-assert reset. */
370 mode = PLL_BYPASSNL | PLL_RESET_N;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700371 writel_relaxed(mode, PLL_MODE_REG(pll));
372
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700373 /* Wait for pll to lock. */
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700374 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
375 if (readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)
376 break;
377 udelay(1);
378 }
379
380 if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)) {
Matt Wagantallf82f2942012-01-27 13:56:13 -0800381 WARN("PLL %s didn't lock after enabling it!\n", c->dbg_name);
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700382 ret = -ETIMEDOUT;
383 goto out;
384 }
385
386 /* Enable PLL output. */
Matt Wagantall10dde282012-05-14 19:30:11 -0700387 mode |= PLL_OUTCTRL;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700388 writel_relaxed(mode, PLL_MODE_REG(pll));
389
390 /* Ensure the write above goes through before returning. */
391 mb();
392
393out:
394 spin_unlock_irqrestore(&pll_reg_lock, flags);
395 return ret;
396}
397
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800398struct clk_ops clk_ops_local_pll = {
399 .enable = local_pll_clk_enable,
400 .disable = local_pll_clk_disable,
Tianyi Goua836dd32012-09-14 10:21:10 -0700401 .set_rate = local_pll_clk_set_rate,
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700402 .handoff = local_pll_clk_handoff,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800403};
404
Patrick Daly79323142012-12-05 15:06:42 -0800405struct clk_ops clk_ops_sr2_pll = {
406 .enable = sr2_pll_clk_enable,
407 .disable = local_pll_clk_disable,
408 .set_rate = local_pll_clk_set_rate,
409 .handoff = local_pll_clk_handoff,
410};
411
Pankaj Kumar3912c982011-12-07 16:59:03 +0530412struct pll_rate {
413 unsigned int lvalue;
414 unsigned long rate;
415};
416
417static struct pll_rate pll_l_rate[] = {
418 {10, 196000000},
419 {12, 245760000},
420 {30, 589820000},
421 {38, 737280000},
422 {41, 800000000},
423 {50, 960000000},
424 {52, 1008000000},
Trilok Soni4a0be012012-10-16 16:26:24 +0530425 {57, 1104000000},
Trilok Soni48631722012-05-17 20:56:42 +0530426 {60, 1152000000},
Pankaj Kumar3912c982011-12-07 16:59:03 +0530427 {62, 1200000000},
Pankaj Kumar50c705c2012-01-10 12:02:07 +0530428 {63, 1209600000},
Kaushal Kumar86473f02012-06-28 19:35:58 +0530429 {73, 1401600000},
Pankaj Kumar3912c982011-12-07 16:59:03 +0530430 {0, 0},
431};
432
433#define PLL_BASE 7
434
435struct shared_pll_control {
436 uint32_t version;
437 struct {
438 /*
439 * Denotes if the PLL is ON. Technically, this can be read
440 * directly from the PLL registers, but this feild is here,
441 * so let's use it.
442 */
443 uint32_t on;
444 /*
445 * One bit for each processor core. The application processor
446 * is allocated bit position 1. All other bits should be
447 * considered as votes from other processors.
448 */
449 uint32_t votes;
450 } pll[PLL_BASE + PLL_END];
451};
452
453static remote_spinlock_t pll_lock;
454static struct shared_pll_control *pll_control;
455
456void __init msm_shared_pll_control_init(void)
457{
458#define PLL_REMOTE_SPINLOCK_ID "S:7"
459 unsigned smem_size;
460
461 remote_spin_lock_init(&pll_lock, PLL_REMOTE_SPINLOCK_ID);
462
463 pll_control = smem_get_entry(SMEM_CLKREGIM_SOURCES, &smem_size);
464 if (!pll_control) {
465 pr_err("Can't find shared PLL control data structure!\n");
466 BUG();
467 /*
468 * There might be more PLLs than what the application processor knows
469 * about. But the index used for each PLL is guaranteed to remain the
470 * same.
471 */
472 } else if (smem_size < sizeof(struct shared_pll_control)) {
473 pr_err("Shared PLL control data"
474 "structure too small!\n");
475 BUG();
476 } else if (pll_control->version != 0xCCEE0001) {
477 pr_err("Shared PLL control version mismatch!\n");
478 BUG();
479 } else {
480 pr_info("Shared PLL control available.\n");
481 return;
482 }
483
484}
485
Matt Wagantallf82f2942012-01-27 13:56:13 -0800486static int pll_clk_enable(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530487{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800488 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530489 unsigned int pll_id = pll->id;
490
491 remote_spin_lock(&pll_lock);
492
493 pll_control->pll[PLL_BASE + pll_id].votes |= BIT(1);
494 if (!pll_control->pll[PLL_BASE + pll_id].on) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700495 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530496 pll_control->pll[PLL_BASE + pll_id].on = 1;
497 }
498
499 remote_spin_unlock(&pll_lock);
500 return 0;
501}
502
Matt Wagantallf82f2942012-01-27 13:56:13 -0800503static void pll_clk_disable(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530504{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800505 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530506 unsigned int pll_id = pll->id;
507
508 remote_spin_lock(&pll_lock);
509
510 pll_control->pll[PLL_BASE + pll_id].votes &= ~BIT(1);
511 if (pll_control->pll[PLL_BASE + pll_id].on
512 && !pll_control->pll[PLL_BASE + pll_id].votes) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700513 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530514 pll_control->pll[PLL_BASE + pll_id].on = 0;
515 }
516
517 remote_spin_unlock(&pll_lock);
518}
519
Matt Wagantallf82f2942012-01-27 13:56:13 -0800520static int pll_clk_is_enabled(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530521{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800522 return readl_relaxed(PLL_MODE_REG(to_pll_shared_clk(c))) & BIT(0);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530523}
524
Matt Wagantallf82f2942012-01-27 13:56:13 -0800525static enum handoff pll_clk_handoff(struct clk *c)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530526{
Matt Wagantallf82f2942012-01-27 13:56:13 -0800527 struct pll_shared_clk *pll = to_pll_shared_clk(c);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530528 unsigned int pll_lval;
529 struct pll_rate *l;
530
531 /*
532 * Wait for the PLLs to be initialized and then read their frequency.
533 */
534 do {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700535 pll_lval = readl_relaxed(PLL_MODE_REG(pll) + 4) & 0x3ff;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530536 cpu_relax();
537 udelay(50);
538 } while (pll_lval == 0);
539
540 /* Convert PLL L values to PLL Output rate */
541 for (l = pll_l_rate; l->rate != 0; l++) {
542 if (l->lvalue == pll_lval) {
Matt Wagantallf82f2942012-01-27 13:56:13 -0800543 c->rate = l->rate;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530544 break;
545 }
546 }
547
Matt Wagantallf82f2942012-01-27 13:56:13 -0800548 if (!c->rate) {
Pankaj Kumar3912c982011-12-07 16:59:03 +0530549 pr_crit("Unknown PLL's L value!\n");
550 BUG();
551 }
552
Saravana Kannan87f0d1b2013-02-07 14:09:36 -0800553 if (!pll_clk_is_enabled(c))
554 return HANDOFF_DISABLED_CLK;
555
556 /*
557 * Do not call pll_clk_enable() since that function can assume
558 * the PLL is not in use when it's called.
559 */
560 remote_spin_lock(&pll_lock);
561 pll_control->pll[PLL_BASE + pll->id].votes |= BIT(1);
562 pll_control->pll[PLL_BASE + pll->id].on = 1;
563 remote_spin_unlock(&pll_lock);
564
Matt Wagantalla15833b2012-04-03 11:00:56 -0700565 return HANDOFF_ENABLED_CLK;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530566}
567
Matt Wagantallae053222012-05-14 19:42:07 -0700568struct clk_ops clk_ops_pll = {
Pankaj Kumar3912c982011-12-07 16:59:03 +0530569 .enable = pll_clk_enable,
570 .disable = pll_clk_disable,
571 .handoff = pll_clk_handoff,
Pankaj Kumar3912c982011-12-07 16:59:03 +0530572 .is_enabled = pll_clk_is_enabled,
573};
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700574
Tianyi Goub0f74a92012-10-11 14:10:08 -0700575static DEFINE_SPINLOCK(soft_vote_lock);
576
577static int pll_acpu_vote_clk_enable(struct clk *c)
578{
579 int ret = 0;
580 unsigned long flags;
581 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
582
583 spin_lock_irqsave(&soft_vote_lock, flags);
584
585 if (!*pllv->soft_vote)
586 ret = pll_vote_clk_enable(c);
587 if (ret == 0)
588 *pllv->soft_vote |= (pllv->soft_vote_mask);
589
590 spin_unlock_irqrestore(&soft_vote_lock, flags);
591 return ret;
592}
593
594static void pll_acpu_vote_clk_disable(struct clk *c)
595{
596 unsigned long flags;
597 struct pll_vote_clk *pllv = to_pll_vote_clk(c);
598
599 spin_lock_irqsave(&soft_vote_lock, flags);
600
601 *pllv->soft_vote &= ~(pllv->soft_vote_mask);
602 if (!*pllv->soft_vote)
603 pll_vote_clk_disable(c);
604
605 spin_unlock_irqrestore(&soft_vote_lock, flags);
606}
607
Saravana Kannan87f0d1b2013-02-07 14:09:36 -0800608static enum handoff pll_acpu_vote_clk_handoff(struct clk *c)
609{
610 if (pll_vote_clk_handoff(c) == HANDOFF_DISABLED_CLK)
611 return HANDOFF_DISABLED_CLK;
612
613 if (pll_acpu_vote_clk_enable(c))
614 return HANDOFF_DISABLED_CLK;
615
616 return HANDOFF_ENABLED_CLK;
617}
618
Tianyi Goub0f74a92012-10-11 14:10:08 -0700619struct clk_ops clk_ops_pll_acpu_vote = {
620 .enable = pll_acpu_vote_clk_enable,
621 .disable = pll_acpu_vote_clk_disable,
622 .is_enabled = pll_vote_clk_is_enabled,
Saravana Kannan87f0d1b2013-02-07 14:09:36 -0800623 .handoff = pll_acpu_vote_clk_handoff,
Tianyi Goub0f74a92012-10-11 14:10:08 -0700624};
625
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700626static void __init __set_fsm_mode(void __iomem *mode_reg,
627 u32 bias_count, u32 lock_count)
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700628{
629 u32 regval = readl_relaxed(mode_reg);
630
631 /* De-assert reset to FSM */
632 regval &= ~BIT(21);
633 writel_relaxed(regval, mode_reg);
634
635 /* Program bias count */
636 regval &= ~BM(19, 14);
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700637 regval |= BVAL(19, 14, bias_count);
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700638 writel_relaxed(regval, mode_reg);
639
640 /* Program lock count */
641 regval &= ~BM(13, 8);
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700642 regval |= BVAL(13, 8, lock_count);
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700643 writel_relaxed(regval, mode_reg);
644
645 /* Enable PLL FSM voting */
646 regval |= BIT(20);
647 writel_relaxed(regval, mode_reg);
648}
649
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700650void __init __configure_pll(struct pll_config *config,
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700651 struct pll_config_regs *regs, u32 ena_fsm_mode)
652{
653 u32 regval;
654
655 writel_relaxed(config->l, PLL_L_REG(regs));
656 writel_relaxed(config->m, PLL_M_REG(regs));
657 writel_relaxed(config->n, PLL_N_REG(regs));
658
659 regval = readl_relaxed(PLL_CONFIG_REG(regs));
660
661 /* Enable the MN accumulator */
662 if (config->mn_ena_mask) {
663 regval &= ~config->mn_ena_mask;
664 regval |= config->mn_ena_val;
665 }
666
667 /* Enable the main output */
668 if (config->main_output_mask) {
669 regval &= ~config->main_output_mask;
670 regval |= config->main_output_val;
671 }
672
673 /* Set pre-divider and post-divider values */
674 regval &= ~config->pre_div_mask;
675 regval |= config->pre_div_val;
676 regval &= ~config->post_div_mask;
677 regval |= config->post_div_val;
678
679 /* Select VCO setting */
680 regval &= ~config->vco_mask;
681 regval |= config->vco_val;
682 writel_relaxed(regval, PLL_CONFIG_REG(regs));
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700683}
Vikram Mulukutla6da35d32012-07-18 13:55:31 -0700684
685void __init configure_sr_pll(struct pll_config *config,
686 struct pll_config_regs *regs, u32 ena_fsm_mode)
687{
688 __configure_pll(config, regs, ena_fsm_mode);
689 if (ena_fsm_mode)
690 __set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x8);
691}
692
693void __init configure_sr_hpm_lp_pll(struct pll_config *config,
694 struct pll_config_regs *regs, u32 ena_fsm_mode)
695{
696 __configure_pll(config, regs, ena_fsm_mode);
697 if (ena_fsm_mode)
698 __set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x0);
699}
700