blob: a3e886a38480a75060d310bc128944e54169c80d [file] [log] [blame]
Heiko Stübner90c59022014-07-03 01:59:10 +02001/*
2 * Copyright (c) 2014 MundoReader S.L.
3 * Author: Heiko Stuebner <heiko@sntech.de>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <asm/div64.h>
17#include <linux/slab.h>
18#include <linux/io.h>
19#include <linux/delay.h>
20#include <linux/clk.h>
21#include <linux/clk-provider.h>
22#include <linux/regmap.h>
23#include "clk.h"
24
25#define PLL_MODE_MASK 0x3
26#define PLL_MODE_SLOW 0x0
27#define PLL_MODE_NORM 0x1
28#define PLL_MODE_DEEP 0x2
29
30struct rockchip_clk_pll {
31 struct clk_hw hw;
32
33 struct clk_mux pll_mux;
34 const struct clk_ops *pll_mux_ops;
35
36 struct notifier_block clk_nb;
Heiko Stübner90c59022014-07-03 01:59:10 +020037
38 void __iomem *reg_base;
39 int lock_offset;
40 unsigned int lock_shift;
41 enum rockchip_pll_type type;
42 const struct rockchip_pll_rate_table *rate_table;
43 unsigned int rate_count;
44 spinlock_t *lock;
45};
46
47#define to_rockchip_clk_pll(_hw) container_of(_hw, struct rockchip_clk_pll, hw)
48#define to_rockchip_clk_pll_nb(nb) \
49 container_of(nb, struct rockchip_clk_pll, clk_nb)
50
51static const struct rockchip_pll_rate_table *rockchip_get_pll_settings(
52 struct rockchip_clk_pll *pll, unsigned long rate)
53{
54 const struct rockchip_pll_rate_table *rate_table = pll->rate_table;
55 int i;
56
57 for (i = 0; i < pll->rate_count; i++) {
58 if (rate == rate_table[i].rate)
59 return &rate_table[i];
60 }
61
62 return NULL;
63}
64
65static long rockchip_pll_round_rate(struct clk_hw *hw,
66 unsigned long drate, unsigned long *prate)
67{
68 struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
69 const struct rockchip_pll_rate_table *rate_table = pll->rate_table;
70 int i;
71
72 /* Assumming rate_table is in descending order */
73 for (i = 0; i < pll->rate_count; i++) {
74 if (drate >= rate_table[i].rate)
75 return rate_table[i].rate;
76 }
77
78 /* return minimum supported value */
79 return rate_table[i - 1].rate;
80}
81
82/*
83 * Wait for the pll to reach the locked state.
84 * The calling set_rate function is responsible for making sure the
85 * grf regmap is available.
86 */
87static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll)
88{
89 struct regmap *grf = rockchip_clk_get_grf();
90 unsigned int val;
91 int delay = 24000000, ret;
92
93 while (delay > 0) {
94 ret = regmap_read(grf, pll->lock_offset, &val);
95 if (ret) {
96 pr_err("%s: failed to read pll lock status: %d\n",
97 __func__, ret);
98 return ret;
99 }
100
101 if (val & BIT(pll->lock_shift))
102 return 0;
103 delay--;
104 }
105
106 pr_err("%s: timeout waiting for pll to lock\n", __func__);
107 return -ETIMEDOUT;
108}
109
110/**
Heiko Stübner90c59022014-07-03 01:59:10 +0200111 * PLL used in RK3066, RK3188 and RK3288
112 */
113
114#define RK3066_PLL_RESET_DELAY(nr) ((nr * 500) / 24 + 1)
115
116#define RK3066_PLLCON(i) (i * 0x4)
117#define RK3066_PLLCON0_OD_MASK 0xf
118#define RK3066_PLLCON0_OD_SHIFT 0
119#define RK3066_PLLCON0_NR_MASK 0x3f
120#define RK3066_PLLCON0_NR_SHIFT 8
121#define RK3066_PLLCON1_NF_MASK 0x1fff
122#define RK3066_PLLCON1_NF_SHIFT 0
123#define RK3066_PLLCON2_BWADJ_MASK 0xfff
124#define RK3066_PLLCON2_BWADJ_SHIFT 0
125#define RK3066_PLLCON3_RESET (1 << 5)
126#define RK3066_PLLCON3_PWRDOWN (1 << 1)
127#define RK3066_PLLCON3_BYPASS (1 << 0)
128
129static unsigned long rockchip_rk3066_pll_recalc_rate(struct clk_hw *hw,
130 unsigned long prate)
131{
132 struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
133 u64 nf, nr, no, rate64 = prate;
134 u32 pllcon;
135
136 pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(3));
137 if (pllcon & RK3066_PLLCON3_BYPASS) {
138 pr_debug("%s: pll %s is bypassed\n", __func__,
139 __clk_get_name(hw->clk));
140 return prate;
141 }
142
143 pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(1));
144 nf = (pllcon >> RK3066_PLLCON1_NF_SHIFT) & RK3066_PLLCON1_NF_MASK;
145
146 pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(0));
147 nr = (pllcon >> RK3066_PLLCON0_NR_SHIFT) & RK3066_PLLCON0_NR_MASK;
148 no = (pllcon >> RK3066_PLLCON0_OD_SHIFT) & RK3066_PLLCON0_OD_MASK;
149
150 rate64 *= (nf + 1);
151 do_div(rate64, nr + 1);
152 do_div(rate64, no + 1);
153
154 return (unsigned long)rate64;
155}
156
157static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate,
158 unsigned long prate)
159{
160 struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
161 const struct rockchip_pll_rate_table *rate;
162 unsigned long old_rate = rockchip_rk3066_pll_recalc_rate(hw, prate);
163 struct regmap *grf = rockchip_clk_get_grf();
Doug Anderson9c030ea2014-09-15 21:07:57 -0700164 struct clk_mux *pll_mux = &pll->pll_mux;
165 const struct clk_ops *pll_mux_ops = pll->pll_mux_ops;
166 int rate_change_remuxed = 0;
167 int cur_parent;
Heiko Stübner90c59022014-07-03 01:59:10 +0200168 int ret;
169
170 if (IS_ERR(grf)) {
171 pr_debug("%s: grf regmap not available, aborting rate change\n",
172 __func__);
173 return PTR_ERR(grf);
174 }
175
176 pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n",
177 __func__, __clk_get_name(hw->clk), old_rate, drate, prate);
178
179 /* Get required rate settings from table */
180 rate = rockchip_get_pll_settings(pll, drate);
181 if (!rate) {
182 pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
183 drate, __clk_get_name(hw->clk));
184 return -EINVAL;
185 }
186
187 pr_debug("%s: rate settings for %lu (nr, no, nf): (%d, %d, %d)\n",
188 __func__, rate->rate, rate->nr, rate->no, rate->nf);
189
Doug Anderson9c030ea2014-09-15 21:07:57 -0700190 cur_parent = pll_mux_ops->get_parent(&pll_mux->hw);
191 if (cur_parent == PLL_MODE_NORM) {
192 pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW);
193 rate_change_remuxed = 1;
194 }
195
Heiko Stübner90c59022014-07-03 01:59:10 +0200196 /* enter reset mode */
197 writel(HIWORD_UPDATE(RK3066_PLLCON3_RESET, RK3066_PLLCON3_RESET, 0),
198 pll->reg_base + RK3066_PLLCON(3));
199
200 /* update pll values */
201 writel(HIWORD_UPDATE(rate->nr - 1, RK3066_PLLCON0_NR_MASK,
202 RK3066_PLLCON0_NR_SHIFT) |
203 HIWORD_UPDATE(rate->no - 1, RK3066_PLLCON0_OD_MASK,
204 RK3066_PLLCON0_OD_SHIFT),
205 pll->reg_base + RK3066_PLLCON(0));
206
207 writel_relaxed(HIWORD_UPDATE(rate->nf - 1, RK3066_PLLCON1_NF_MASK,
208 RK3066_PLLCON1_NF_SHIFT),
209 pll->reg_base + RK3066_PLLCON(1));
210 writel_relaxed(HIWORD_UPDATE(rate->bwadj, RK3066_PLLCON2_BWADJ_MASK,
211 RK3066_PLLCON2_BWADJ_SHIFT),
212 pll->reg_base + RK3066_PLLCON(2));
213
214 /* leave reset and wait the reset_delay */
215 writel(HIWORD_UPDATE(0, RK3066_PLLCON3_RESET, 0),
216 pll->reg_base + RK3066_PLLCON(3));
217 udelay(RK3066_PLL_RESET_DELAY(rate->nr));
218
219 /* wait for the pll to lock */
220 ret = rockchip_pll_wait_lock(pll);
221 if (ret) {
222 pr_warn("%s: pll did not lock, trying to restore old rate %lu\n",
223 __func__, old_rate);
224 rockchip_rk3066_pll_set_rate(hw, old_rate, prate);
225 }
226
Doug Anderson9c030ea2014-09-15 21:07:57 -0700227 if (rate_change_remuxed)
228 pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_NORM);
229
Heiko Stübner90c59022014-07-03 01:59:10 +0200230 return ret;
231}
232
233static int rockchip_rk3066_pll_enable(struct clk_hw *hw)
234{
235 struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
236
237 writel(HIWORD_UPDATE(0, RK3066_PLLCON3_PWRDOWN, 0),
238 pll->reg_base + RK3066_PLLCON(3));
239
240 return 0;
241}
242
243static void rockchip_rk3066_pll_disable(struct clk_hw *hw)
244{
245 struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
246
247 writel(HIWORD_UPDATE(RK3066_PLLCON3_PWRDOWN,
248 RK3066_PLLCON3_PWRDOWN, 0),
249 pll->reg_base + RK3066_PLLCON(3));
250}
251
252static int rockchip_rk3066_pll_is_enabled(struct clk_hw *hw)
253{
254 struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
255 u32 pllcon = readl(pll->reg_base + RK3066_PLLCON(3));
256
257 return !(pllcon & RK3066_PLLCON3_PWRDOWN);
258}
259
260static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = {
261 .recalc_rate = rockchip_rk3066_pll_recalc_rate,
262 .enable = rockchip_rk3066_pll_enable,
263 .disable = rockchip_rk3066_pll_disable,
264 .is_enabled = rockchip_rk3066_pll_is_enabled,
265};
266
267static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
268 .recalc_rate = rockchip_rk3066_pll_recalc_rate,
269 .round_rate = rockchip_pll_round_rate,
270 .set_rate = rockchip_rk3066_pll_set_rate,
271 .enable = rockchip_rk3066_pll_enable,
272 .disable = rockchip_rk3066_pll_disable,
273 .is_enabled = rockchip_rk3066_pll_is_enabled,
274};
275
276/*
277 * Common registering of pll clocks
278 */
279
280struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
281 const char *name, const char **parent_names, u8 num_parents,
282 void __iomem *base, int con_offset, int grf_lock_offset,
283 int lock_shift, int mode_offset, int mode_shift,
284 struct rockchip_pll_rate_table *rate_table,
285 spinlock_t *lock)
286{
287 const char *pll_parents[3];
288 struct clk_init_data init;
289 struct rockchip_clk_pll *pll;
290 struct clk_mux *pll_mux;
291 struct clk *pll_clk, *mux_clk;
292 char pll_name[20];
Heiko Stübner90c59022014-07-03 01:59:10 +0200293
294 if (num_parents != 2) {
295 pr_err("%s: needs two parent clocks\n", __func__);
296 return ERR_PTR(-EINVAL);
297 }
298
299 /* name the actual pll */
300 snprintf(pll_name, sizeof(pll_name), "pll_%s", name);
301
302 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
303 if (!pll)
304 return ERR_PTR(-ENOMEM);
305
306 init.name = pll_name;
307
308 /* keep all plls untouched for now */
309 init.flags = CLK_IGNORE_UNUSED;
310
311 init.parent_names = &parent_names[0];
312 init.num_parents = 1;
313
314 if (rate_table) {
315 int len;
316
317 /* find count of rates in rate_table */
318 for (len = 0; rate_table[len].rate != 0; )
319 len++;
320
321 pll->rate_count = len;
322 pll->rate_table = kmemdup(rate_table,
323 pll->rate_count *
324 sizeof(struct rockchip_pll_rate_table),
325 GFP_KERNEL);
326 WARN(!pll->rate_table,
327 "%s: could not allocate rate table for %s\n",
328 __func__, name);
329 }
330
331 switch (pll_type) {
332 case pll_rk3066:
333 if (!pll->rate_table)
334 init.ops = &rockchip_rk3066_pll_clk_norate_ops;
335 else
336 init.ops = &rockchip_rk3066_pll_clk_ops;
337 break;
338 default:
339 pr_warn("%s: Unknown pll type for pll clk %s\n",
340 __func__, name);
341 }
342
343 pll->hw.init = &init;
344 pll->type = pll_type;
345 pll->reg_base = base + con_offset;
346 pll->lock_offset = grf_lock_offset;
347 pll->lock_shift = lock_shift;
348 pll->lock = lock;
Heiko Stübner90c59022014-07-03 01:59:10 +0200349
350 pll_clk = clk_register(NULL, &pll->hw);
351 if (IS_ERR(pll_clk)) {
352 pr_err("%s: failed to register pll clock %s : %ld\n",
353 __func__, name, PTR_ERR(pll_clk));
354 mux_clk = pll_clk;
355 goto err_pll;
356 }
357
Heiko Stübner90c59022014-07-03 01:59:10 +0200358 /* create the mux on top of the real pll */
359 pll->pll_mux_ops = &clk_mux_ops;
360 pll_mux = &pll->pll_mux;
361
362 /* the actual muxing is xin24m, pll-output, xin32k */
363 pll_parents[0] = parent_names[0];
364 pll_parents[1] = pll_name;
365 pll_parents[2] = parent_names[1];
366
367 init.name = name;
368 init.flags = CLK_SET_RATE_PARENT;
369 init.ops = pll->pll_mux_ops;
370 init.parent_names = pll_parents;
371 init.num_parents = ARRAY_SIZE(pll_parents);
372
373 pll_mux->reg = base + mode_offset;
374 pll_mux->shift = mode_shift;
375 pll_mux->mask = PLL_MODE_MASK;
376 pll_mux->flags = 0;
377 pll_mux->lock = lock;
378 pll_mux->hw.init = &init;
379
380 if (pll_type == pll_rk3066)
381 pll_mux->flags |= CLK_MUX_HIWORD_MASK;
382
383 mux_clk = clk_register(NULL, &pll_mux->hw);
384 if (IS_ERR(mux_clk))
385 goto err_mux;
386
387 return mux_clk;
388
389err_mux:
Heiko Stübner90c59022014-07-03 01:59:10 +0200390 clk_unregister(pll_clk);
391err_pll:
392 kfree(pll);
393 return mux_clk;
394}