blob: d8399110cc143fd6abc48e8aabea360748ad58e8 [file] [log] [blame]
Pankaj Kumar3912c982011-12-07 16:59:03 +05301/*
2 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/remote_spinlock.h>
19
Vikram Mulukutla681d8682012-03-09 23:56:20 -080020#include <mach/scm-io.h>
Pankaj Kumar3912c982011-12-07 16:59:03 +053021#include <mach/msm_iomap.h>
22
23#include "clock.h"
24#include "clock-pll.h"
25#include "smd_private.h"
26
Vikram Mulukutla681d8682012-03-09 23:56:20 -080027#ifdef CONFIG_MSM_SECURE_IO
28#undef readl_relaxed
29#undef writel_relaxed
30#define readl_relaxed secure_readl
31#define writel_relaxed secure_writel
32#endif
33
34#define PLL_OUTCTRL BIT(0)
35#define PLL_BYPASSNL BIT(1)
36#define PLL_RESET_N BIT(2)
37#define PLL_MODE_MASK BM(3, 0)
38
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070039#define PLL_EN_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->en_reg)) : \
40 ((x)->en_reg))
41#define PLL_STATUS_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->status_reg)) : \
42 ((x)->status_reg))
43#define PLL_MODE_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->mode_reg)) : \
44 ((x)->mode_reg))
Vikram Mulukutla5b146722012-04-23 18:17:50 -070045#define PLL_L_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->l_reg)) : \
46 ((x)->l_reg))
47#define PLL_M_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->m_reg)) : \
48 ((x)->m_reg))
49#define PLL_N_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->n_reg)) : \
50 ((x)->n_reg))
51#define PLL_CONFIG_REG(x) ((x)->base ? (*(x)->base + (u32)((x)->config_reg)) : \
52 ((x)->config_reg))
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070053
Vikram Mulukutla681d8682012-03-09 23:56:20 -080054static DEFINE_SPINLOCK(pll_reg_lock);
55
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070056#define ENABLE_WAIT_MAX_LOOPS 200
57
Vikram Mulukutla681d8682012-03-09 23:56:20 -080058int pll_vote_clk_enable(struct clk *clk)
59{
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070060 u32 ena, count;
Vikram Mulukutla681d8682012-03-09 23:56:20 -080061 unsigned long flags;
62 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
63
64 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070065 ena = readl_relaxed(PLL_EN_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080066 ena |= pll->en_mask;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070067 writel_relaxed(ena, PLL_EN_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080068 spin_unlock_irqrestore(&pll_reg_lock, flags);
69
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070070 /*
71 * Use a memory barrier since some PLL status registers are
72 * not within the same 1K segment as the voting registers.
73 */
74 mb();
Vikram Mulukutla681d8682012-03-09 23:56:20 -080075
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070076 /* Wait for pll to enable. */
77 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -070078 if (readl_relaxed(PLL_STATUS_REG(pll)) & pll->status_mask)
Vikram Mulukutla7b953b12012-04-09 13:56:26 -070079 return 0;
80 udelay(1);
81 }
82
83 WARN("PLL %s didn't enable after voting for it!\n", clk->dbg_name);
84
85 return -ETIMEDOUT;
Vikram Mulukutla681d8682012-03-09 23:56:20 -080086}
87
88void pll_vote_clk_disable(struct clk *clk)
89{
90 u32 ena;
91 unsigned long flags;
92 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
93
94 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070095 ena = readl_relaxed(PLL_EN_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080096 ena &= ~(pll->en_mask);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -070097 writel_relaxed(ena, PLL_EN_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -080098 spin_unlock_irqrestore(&pll_reg_lock, flags);
99}
100
101struct clk *pll_vote_clk_get_parent(struct clk *clk)
102{
103 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
104 return pll->parent;
105}
106
107int pll_vote_clk_is_enabled(struct clk *clk)
108{
109 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700110 return !!(readl_relaxed(PLL_STATUS_REG(pll)) & pll->status_mask);
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800111}
112
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700113static enum handoff pll_vote_clk_handoff(struct clk *clk)
114{
115 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
116 if (readl_relaxed(PLL_EN_REG(pll)) & pll->en_mask)
117 return HANDOFF_ENABLED_CLK;
118
119 return HANDOFF_DISABLED_CLK;
120}
121
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800122struct clk_ops clk_ops_pll_vote = {
123 .enable = pll_vote_clk_enable,
124 .disable = pll_vote_clk_disable,
125 .auto_off = pll_vote_clk_disable,
126 .is_enabled = pll_vote_clk_is_enabled,
127 .get_parent = pll_vote_clk_get_parent,
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700128 .handoff = pll_vote_clk_handoff,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800129};
130
131static void __pll_clk_enable_reg(void __iomem *mode_reg)
132{
133 u32 mode = readl_relaxed(mode_reg);
134 /* Disable PLL bypass mode. */
135 mode |= PLL_BYPASSNL;
136 writel_relaxed(mode, mode_reg);
137
138 /*
139 * H/W requires a 5us delay between disabling the bypass and
140 * de-asserting the reset. Delay 10us just to be safe.
141 */
142 mb();
143 udelay(10);
144
145 /* De-assert active-low PLL reset. */
146 mode |= PLL_RESET_N;
147 writel_relaxed(mode, mode_reg);
148
149 /* Wait until PLL is locked. */
150 mb();
151 udelay(50);
152
153 /* Enable PLL output. */
154 mode |= PLL_OUTCTRL;
155 writel_relaxed(mode, mode_reg);
156
157 /* Ensure that the write above goes through before returning. */
158 mb();
159}
160
161static int local_pll_clk_enable(struct clk *clk)
162{
163 unsigned long flags;
164 struct pll_clk *pll = to_pll_clk(clk);
165
166 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700167 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800168 spin_unlock_irqrestore(&pll_reg_lock, flags);
169
170 return 0;
171}
172
173static void __pll_clk_disable_reg(void __iomem *mode_reg)
174{
175 u32 mode = readl_relaxed(mode_reg);
176 mode &= ~PLL_MODE_MASK;
177 writel_relaxed(mode, mode_reg);
178}
179
180static void local_pll_clk_disable(struct clk *clk)
181{
182 unsigned long flags;
183 struct pll_clk *pll = to_pll_clk(clk);
184
185 /*
186 * Disable the PLL output, disable test mode, enable
187 * the bypass mode, and assert the reset.
188 */
189 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700190 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800191 spin_unlock_irqrestore(&pll_reg_lock, flags);
192}
193
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700194static enum handoff local_pll_clk_handoff(struct clk *clk)
195{
196 struct pll_clk *pll = to_pll_clk(clk);
197 u32 mode = readl_relaxed(PLL_MODE_REG(pll));
198 u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
199
200 if ((mode & mask) == mask)
201 return HANDOFF_ENABLED_CLK;
202
203 return HANDOFF_DISABLED_CLK;
204}
205
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800206static struct clk *local_pll_clk_get_parent(struct clk *clk)
207{
208 struct pll_clk *pll = to_pll_clk(clk);
209 return pll->parent;
210}
211
212int sr_pll_clk_enable(struct clk *clk)
213{
214 u32 mode;
215 unsigned long flags;
216 struct pll_clk *pll = to_pll_clk(clk);
217
218 spin_lock_irqsave(&pll_reg_lock, flags);
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700219 mode = readl_relaxed(PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800220 /* De-assert active-low PLL reset. */
221 mode |= PLL_RESET_N;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700222 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800223
224 /*
225 * H/W requires a 5us delay between disabling the bypass and
226 * de-asserting the reset. Delay 10us just to be safe.
227 */
228 mb();
229 udelay(10);
230
231 /* Disable PLL bypass mode. */
232 mode |= PLL_BYPASSNL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700233 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800234
235 /* Wait until PLL is locked. */
236 mb();
237 udelay(60);
238
239 /* Enable PLL output. */
240 mode |= PLL_OUTCTRL;
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700241 writel_relaxed(mode, PLL_MODE_REG(pll));
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800242
243 /* Ensure that the write above goes through before returning. */
244 mb();
245
246 spin_unlock_irqrestore(&pll_reg_lock, flags);
247
248 return 0;
249}
250
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700251#define PLL_LOCKED_BIT BIT(16)
252
253int copper_pll_clk_enable(struct clk *clk)
254{
255 unsigned long flags;
256 struct pll_clk *pll = to_pll_clk(clk);
257 u32 count, mode;
258 int ret = 0;
259
260 spin_lock_irqsave(&pll_reg_lock, flags);
261 mode = readl_relaxed(PLL_MODE_REG(pll));
262 /* Disable PLL bypass mode. */
Matt Wagantall10dde282012-05-14 19:30:11 -0700263 mode |= PLL_BYPASSNL;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700264 writel_relaxed(mode, PLL_MODE_REG(pll));
265
266 /*
267 * H/W requires a 5us delay between disabling the bypass and
268 * de-asserting the reset. Delay 10us just to be safe.
269 */
270 mb();
271 udelay(10);
272
273 /* De-assert active-low PLL reset. */
Matt Wagantall10dde282012-05-14 19:30:11 -0700274 mode |= PLL_RESET_N;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700275 writel_relaxed(mode, PLL_MODE_REG(pll));
276
277 /* Wait for pll to enable. */
278 for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
279 if (readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)
280 break;
281 udelay(1);
282 }
283
284 if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)) {
285 WARN("PLL %s didn't lock after enabling it!\n", clk->dbg_name);
286 ret = -ETIMEDOUT;
287 goto out;
288 }
289
290 /* Enable PLL output. */
Matt Wagantall10dde282012-05-14 19:30:11 -0700291 mode |= PLL_OUTCTRL;
Vikram Mulukutlaaa3e0112012-04-23 14:40:51 -0700292 writel_relaxed(mode, PLL_MODE_REG(pll));
293
294 /* Ensure the write above goes through before returning. */
295 mb();
296
297out:
298 spin_unlock_irqrestore(&pll_reg_lock, flags);
299 return ret;
300}
301
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800302struct clk_ops clk_ops_local_pll = {
303 .enable = local_pll_clk_enable,
304 .disable = local_pll_clk_disable,
305 .auto_off = local_pll_clk_disable,
Matt Wagantall4a36a7e2012-05-14 17:03:21 -0700306 .handoff = local_pll_clk_handoff,
Vikram Mulukutla681d8682012-03-09 23:56:20 -0800307 .get_parent = local_pll_clk_get_parent,
308};
309
Pankaj Kumar3912c982011-12-07 16:59:03 +0530310struct pll_rate {
311 unsigned int lvalue;
312 unsigned long rate;
313};
314
315static struct pll_rate pll_l_rate[] = {
316 {10, 196000000},
317 {12, 245760000},
318 {30, 589820000},
319 {38, 737280000},
320 {41, 800000000},
321 {50, 960000000},
322 {52, 1008000000},
Trilok Soni48631722012-05-17 20:56:42 +0530323 {60, 1152000000},
Pankaj Kumar3912c982011-12-07 16:59:03 +0530324 {62, 1200000000},
Pankaj Kumar50c705c2012-01-10 12:02:07 +0530325 {63, 1209600000},
Pankaj Kumar3912c982011-12-07 16:59:03 +0530326 {0, 0},
327};
328
329#define PLL_BASE 7
330
331struct shared_pll_control {
332 uint32_t version;
333 struct {
334 /*
335 * Denotes if the PLL is ON. Technically, this can be read
336 * directly from the PLL registers, but this feild is here,
337 * so let's use it.
338 */
339 uint32_t on;
340 /*
341 * One bit for each processor core. The application processor
342 * is allocated bit position 1. All other bits should be
343 * considered as votes from other processors.
344 */
345 uint32_t votes;
346 } pll[PLL_BASE + PLL_END];
347};
348
349static remote_spinlock_t pll_lock;
350static struct shared_pll_control *pll_control;
351
352void __init msm_shared_pll_control_init(void)
353{
354#define PLL_REMOTE_SPINLOCK_ID "S:7"
355 unsigned smem_size;
356
357 remote_spin_lock_init(&pll_lock, PLL_REMOTE_SPINLOCK_ID);
358
359 pll_control = smem_get_entry(SMEM_CLKREGIM_SOURCES, &smem_size);
360 if (!pll_control) {
361 pr_err("Can't find shared PLL control data structure!\n");
362 BUG();
363 /*
364 * There might be more PLLs than what the application processor knows
365 * about. But the index used for each PLL is guaranteed to remain the
366 * same.
367 */
368 } else if (smem_size < sizeof(struct shared_pll_control)) {
369 pr_err("Shared PLL control data"
370 "structure too small!\n");
371 BUG();
372 } else if (pll_control->version != 0xCCEE0001) {
373 pr_err("Shared PLL control version mismatch!\n");
374 BUG();
375 } else {
376 pr_info("Shared PLL control available.\n");
377 return;
378 }
379
380}
381
Pankaj Kumar3912c982011-12-07 16:59:03 +0530382static int pll_clk_enable(struct clk *clk)
383{
384 struct pll_shared_clk *pll = to_pll_shared_clk(clk);
385 unsigned int pll_id = pll->id;
386
387 remote_spin_lock(&pll_lock);
388
389 pll_control->pll[PLL_BASE + pll_id].votes |= BIT(1);
390 if (!pll_control->pll[PLL_BASE + pll_id].on) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700391 __pll_clk_enable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530392 pll_control->pll[PLL_BASE + pll_id].on = 1;
393 }
394
395 remote_spin_unlock(&pll_lock);
396 return 0;
397}
398
399static void pll_clk_disable(struct clk *clk)
400{
401 struct pll_shared_clk *pll = to_pll_shared_clk(clk);
402 unsigned int pll_id = pll->id;
403
404 remote_spin_lock(&pll_lock);
405
406 pll_control->pll[PLL_BASE + pll_id].votes &= ~BIT(1);
407 if (pll_control->pll[PLL_BASE + pll_id].on
408 && !pll_control->pll[PLL_BASE + pll_id].votes) {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700409 __pll_clk_disable_reg(PLL_MODE_REG(pll));
Pankaj Kumar3912c982011-12-07 16:59:03 +0530410 pll_control->pll[PLL_BASE + pll_id].on = 0;
411 }
412
413 remote_spin_unlock(&pll_lock);
414}
415
416static int pll_clk_is_enabled(struct clk *clk)
417{
418 struct pll_shared_clk *pll = to_pll_shared_clk(clk);
419
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700420 return readl_relaxed(PLL_MODE_REG(pll)) & BIT(0);
Pankaj Kumar3912c982011-12-07 16:59:03 +0530421}
422
Matt Wagantalla15833b2012-04-03 11:00:56 -0700423static enum handoff pll_clk_handoff(struct clk *clk)
Pankaj Kumar3912c982011-12-07 16:59:03 +0530424{
425 struct pll_shared_clk *pll = to_pll_shared_clk(clk);
426 unsigned int pll_lval;
427 struct pll_rate *l;
428
429 /*
430 * Wait for the PLLs to be initialized and then read their frequency.
431 */
432 do {
Vikram Mulukutla4d6caa82012-04-10 18:04:55 -0700433 pll_lval = readl_relaxed(PLL_MODE_REG(pll) + 4) & 0x3ff;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530434 cpu_relax();
435 udelay(50);
436 } while (pll_lval == 0);
437
438 /* Convert PLL L values to PLL Output rate */
439 for (l = pll_l_rate; l->rate != 0; l++) {
440 if (l->lvalue == pll_lval) {
441 clk->rate = l->rate;
442 break;
443 }
444 }
445
446 if (!clk->rate) {
447 pr_crit("Unknown PLL's L value!\n");
448 BUG();
449 }
450
Matt Wagantalla15833b2012-04-03 11:00:56 -0700451 return HANDOFF_ENABLED_CLK;
Pankaj Kumar3912c982011-12-07 16:59:03 +0530452}
453
Matt Wagantallae053222012-05-14 19:42:07 -0700454struct clk_ops clk_ops_pll = {
Pankaj Kumar3912c982011-12-07 16:59:03 +0530455 .enable = pll_clk_enable,
456 .disable = pll_clk_disable,
457 .handoff = pll_clk_handoff,
Pankaj Kumar3912c982011-12-07 16:59:03 +0530458 .is_enabled = pll_clk_is_enabled,
459};
Vikram Mulukutla5b146722012-04-23 18:17:50 -0700460
461static void __init __set_fsm_mode(void __iomem *mode_reg)
462{
463 u32 regval = readl_relaxed(mode_reg);
464
465 /* De-assert reset to FSM */
466 regval &= ~BIT(21);
467 writel_relaxed(regval, mode_reg);
468
469 /* Program bias count */
470 regval &= ~BM(19, 14);
471 regval |= BVAL(19, 14, 0x1);
472 writel_relaxed(regval, mode_reg);
473
474 /* Program lock count */
475 regval &= ~BM(13, 8);
476 regval |= BVAL(13, 8, 0x8);
477 writel_relaxed(regval, mode_reg);
478
479 /* Enable PLL FSM voting */
480 regval |= BIT(20);
481 writel_relaxed(regval, mode_reg);
482}
483
484void __init configure_pll(struct pll_config *config,
485 struct pll_config_regs *regs, u32 ena_fsm_mode)
486{
487 u32 regval;
488
489 writel_relaxed(config->l, PLL_L_REG(regs));
490 writel_relaxed(config->m, PLL_M_REG(regs));
491 writel_relaxed(config->n, PLL_N_REG(regs));
492
493 regval = readl_relaxed(PLL_CONFIG_REG(regs));
494
495 /* Enable the MN accumulator */
496 if (config->mn_ena_mask) {
497 regval &= ~config->mn_ena_mask;
498 regval |= config->mn_ena_val;
499 }
500
501 /* Enable the main output */
502 if (config->main_output_mask) {
503 regval &= ~config->main_output_mask;
504 regval |= config->main_output_val;
505 }
506
507 /* Set pre-divider and post-divider values */
508 regval &= ~config->pre_div_mask;
509 regval |= config->pre_div_val;
510 regval &= ~config->post_div_mask;
511 regval |= config->post_div_val;
512
513 /* Select VCO setting */
514 regval &= ~config->vco_mask;
515 regval |= config->vco_val;
516 writel_relaxed(regval, PLL_CONFIG_REG(regs));
517
518 /* Configure in FSM mode if necessary */
519 if (ena_fsm_mode)
520 __set_fsm_mode(PLL_MODE_REG(regs));
521}