blob: f2e92e81173ab021b7173ae6236bcef8a7742f53 [file] [log] [blame]
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +09001/*
2 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
3 * Copyright (c) 2013 Linaro Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This file contains the utility functions to register the pll clocks.
10*/
11
12#include <linux/errno.h>
13#include "clk.h"
14#include "clk-pll.h"
15
Yadwinder Singh Brar079dbea2013-06-11 15:01:06 +053016struct samsung_clk_pll {
17 struct clk_hw hw;
18 void __iomem *lock_reg;
19 void __iomem *con_reg;
Yadwinder Singh Brar07dc76f2013-06-11 15:01:07 +053020 enum samsung_pll_type type;
Yadwinder Singh Brar3ff6e0d2013-06-11 15:01:12 +053021 unsigned int rate_count;
22 const struct samsung_pll_rate_table *rate_table;
Yadwinder Singh Brar079dbea2013-06-11 15:01:06 +053023};
24
25#define to_clk_pll(_hw) container_of(_hw, struct samsung_clk_pll, hw)
26
Yadwinder Singh Brardfa89312013-06-11 15:01:13 +053027static const struct samsung_pll_rate_table *samsung_get_pll_settings(
28 struct samsung_clk_pll *pll, unsigned long rate)
29{
30 const struct samsung_pll_rate_table *rate_table = pll->rate_table;
31 int i;
32
33 for (i = 0; i < pll->rate_count; i++) {
34 if (rate == rate_table[i].rate)
35 return &rate_table[i];
36 }
37
38 return NULL;
39}
40
41static long samsung_pll_round_rate(struct clk_hw *hw,
42 unsigned long drate, unsigned long *prate)
43{
44 struct samsung_clk_pll *pll = to_clk_pll(hw);
45 const struct samsung_pll_rate_table *rate_table = pll->rate_table;
46 int i;
47
48 /* Assumming rate_table is in descending order */
49 for (i = 0; i < pll->rate_count; i++) {
50 if (drate >= rate_table[i].rate)
51 return rate_table[i].rate;
52 }
53
54 /* return minimum supported value */
55 return rate_table[i - 1].rate;
56}
57
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +090058/*
59 * PLL35xx Clock Type
60 */
Yadwinder Singh Brardfa89312013-06-11 15:01:13 +053061/* Maximum lock time can be 270 * PDIV cycles */
62#define PLL35XX_LOCK_FACTOR (270)
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +090063
64#define PLL35XX_MDIV_MASK (0x3FF)
65#define PLL35XX_PDIV_MASK (0x3F)
66#define PLL35XX_SDIV_MASK (0x7)
Yadwinder Singh Brardfa89312013-06-11 15:01:13 +053067#define PLL35XX_LOCK_STAT_MASK (0x1)
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +090068#define PLL35XX_MDIV_SHIFT (16)
69#define PLL35XX_PDIV_SHIFT (8)
70#define PLL35XX_SDIV_SHIFT (0)
Yadwinder Singh Brardfa89312013-06-11 15:01:13 +053071#define PLL35XX_LOCK_STAT_SHIFT (29)
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +090072
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +090073static unsigned long samsung_pll35xx_recalc_rate(struct clk_hw *hw,
74 unsigned long parent_rate)
75{
Yadwinder Singh Brar079dbea2013-06-11 15:01:06 +053076 struct samsung_clk_pll *pll = to_clk_pll(hw);
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +090077 u32 mdiv, pdiv, sdiv, pll_con;
78 u64 fvco = parent_rate;
79
80 pll_con = __raw_readl(pll->con_reg);
81 mdiv = (pll_con >> PLL35XX_MDIV_SHIFT) & PLL35XX_MDIV_MASK;
82 pdiv = (pll_con >> PLL35XX_PDIV_SHIFT) & PLL35XX_PDIV_MASK;
83 sdiv = (pll_con >> PLL35XX_SDIV_SHIFT) & PLL35XX_SDIV_MASK;
84
85 fvco *= mdiv;
86 do_div(fvco, (pdiv << sdiv));
87
88 return (unsigned long)fvco;
89}
90
Yadwinder Singh Brardfa89312013-06-11 15:01:13 +053091static inline bool samsung_pll35xx_mp_change(
92 const struct samsung_pll_rate_table *rate, u32 pll_con)
93{
94 u32 old_mdiv, old_pdiv;
95
96 old_mdiv = (pll_con >> PLL35XX_MDIV_SHIFT) & PLL35XX_MDIV_MASK;
97 old_pdiv = (pll_con >> PLL35XX_PDIV_SHIFT) & PLL35XX_PDIV_MASK;
98
99 return (rate->mdiv != old_mdiv || rate->pdiv != old_pdiv);
100}
101
102static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
103 unsigned long prate)
104{
105 struct samsung_clk_pll *pll = to_clk_pll(hw);
106 const struct samsung_pll_rate_table *rate;
107 u32 tmp;
108
109 /* Get required rate settings from table */
110 rate = samsung_get_pll_settings(pll, drate);
111 if (!rate) {
112 pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
113 drate, __clk_get_name(hw->clk));
114 return -EINVAL;
115 }
116
117 tmp = __raw_readl(pll->con_reg);
118
119 if (!(samsung_pll35xx_mp_change(rate, tmp))) {
120 /* If only s change, change just s value only*/
121 tmp &= ~(PLL35XX_SDIV_MASK << PLL35XX_SDIV_SHIFT);
122 tmp |= rate->sdiv << PLL35XX_SDIV_SHIFT;
123 __raw_writel(tmp, pll->con_reg);
124
125 return 0;
126 }
127
128 /* Set PLL lock time. */
129 __raw_writel(rate->pdiv * PLL35XX_LOCK_FACTOR,
130 pll->lock_reg);
131
132 /* Change PLL PMS values */
133 tmp &= ~((PLL35XX_MDIV_MASK << PLL35XX_MDIV_SHIFT) |
134 (PLL35XX_PDIV_MASK << PLL35XX_PDIV_SHIFT) |
135 (PLL35XX_SDIV_MASK << PLL35XX_SDIV_SHIFT));
136 tmp |= (rate->mdiv << PLL35XX_MDIV_SHIFT) |
137 (rate->pdiv << PLL35XX_PDIV_SHIFT) |
138 (rate->sdiv << PLL35XX_SDIV_SHIFT);
139 __raw_writel(tmp, pll->con_reg);
140
141 /* wait_lock_time */
142 do {
143 cpu_relax();
144 tmp = __raw_readl(pll->con_reg);
145 } while (!(tmp & (PLL35XX_LOCK_STAT_MASK
146 << PLL35XX_LOCK_STAT_SHIFT)));
147 return 0;
148}
149
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900150static const struct clk_ops samsung_pll35xx_clk_ops = {
151 .recalc_rate = samsung_pll35xx_recalc_rate,
Yadwinder Singh Brardfa89312013-06-11 15:01:13 +0530152 .round_rate = samsung_pll_round_rate,
153 .set_rate = samsung_pll35xx_set_rate,
154};
155
156static const struct clk_ops samsung_pll35xx_clk_min_ops = {
157 .recalc_rate = samsung_pll35xx_recalc_rate,
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900158};
159
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900160/*
161 * PLL36xx Clock Type
162 */
Vikas Sajjanbb4278d2013-06-11 15:01:14 +0530163/* Maximum lock time can be 3000 * PDIV cycles */
164#define PLL36XX_LOCK_FACTOR (3000)
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900165
166#define PLL36XX_KDIV_MASK (0xFFFF)
167#define PLL36XX_MDIV_MASK (0x1FF)
168#define PLL36XX_PDIV_MASK (0x3F)
169#define PLL36XX_SDIV_MASK (0x7)
170#define PLL36XX_MDIV_SHIFT (16)
171#define PLL36XX_PDIV_SHIFT (8)
172#define PLL36XX_SDIV_SHIFT (0)
Vikas Sajjanbb4278d2013-06-11 15:01:14 +0530173#define PLL36XX_KDIV_SHIFT (0)
174#define PLL36XX_LOCK_STAT_SHIFT (29)
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900175
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900176static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
177 unsigned long parent_rate)
178{
Yadwinder Singh Brar079dbea2013-06-11 15:01:06 +0530179 struct samsung_clk_pll *pll = to_clk_pll(hw);
Doug Anderson071ff9a2013-06-11 08:24:05 -0700180 u32 mdiv, pdiv, sdiv, pll_con0, pll_con1;
181 s16 kdiv;
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900182 u64 fvco = parent_rate;
183
184 pll_con0 = __raw_readl(pll->con_reg);
185 pll_con1 = __raw_readl(pll->con_reg + 4);
186 mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK;
187 pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK;
188 sdiv = (pll_con0 >> PLL36XX_SDIV_SHIFT) & PLL36XX_SDIV_MASK;
Doug Anderson071ff9a2013-06-11 08:24:05 -0700189 kdiv = (s16)(pll_con1 & PLL36XX_KDIV_MASK);
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900190
191 fvco *= (mdiv << 16) + kdiv;
192 do_div(fvco, (pdiv << sdiv));
193 fvco >>= 16;
194
195 return (unsigned long)fvco;
196}
197
Vikas Sajjanbb4278d2013-06-11 15:01:14 +0530198static inline bool samsung_pll36xx_mpk_change(
199 const struct samsung_pll_rate_table *rate, u32 pll_con0, u32 pll_con1)
200{
201 u32 old_mdiv, old_pdiv, old_kdiv;
202
203 old_mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK;
204 old_pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK;
205 old_kdiv = (pll_con1 >> PLL36XX_KDIV_SHIFT) & PLL36XX_KDIV_MASK;
206
207 return (rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
208 rate->kdiv != old_kdiv);
209}
210
211static int samsung_pll36xx_set_rate(struct clk_hw *hw, unsigned long drate,
212 unsigned long parent_rate)
213{
214 struct samsung_clk_pll *pll = to_clk_pll(hw);
215 u32 tmp, pll_con0, pll_con1;
216 const struct samsung_pll_rate_table *rate;
217
218 rate = samsung_get_pll_settings(pll, drate);
219 if (!rate) {
220 pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
221 drate, __clk_get_name(hw->clk));
222 return -EINVAL;
223 }
224
225 pll_con0 = __raw_readl(pll->con_reg);
226 pll_con1 = __raw_readl(pll->con_reg + 4);
227
228 if (!(samsung_pll36xx_mpk_change(rate, pll_con0, pll_con1))) {
229 /* If only s change, change just s value only*/
230 pll_con0 &= ~(PLL36XX_SDIV_MASK << PLL36XX_SDIV_SHIFT);
231 pll_con0 |= (rate->sdiv << PLL36XX_SDIV_SHIFT);
232 __raw_writel(pll_con0, pll->con_reg);
233
234 return 0;
235 }
236
237 /* Set PLL lock time. */
238 __raw_writel(rate->pdiv * PLL36XX_LOCK_FACTOR, pll->lock_reg);
239
240 /* Change PLL PMS values */
241 pll_con0 &= ~((PLL36XX_MDIV_MASK << PLL36XX_MDIV_SHIFT) |
242 (PLL36XX_PDIV_MASK << PLL36XX_PDIV_SHIFT) |
243 (PLL36XX_SDIV_MASK << PLL36XX_SDIV_SHIFT));
244 pll_con0 |= (rate->mdiv << PLL36XX_MDIV_SHIFT) |
245 (rate->pdiv << PLL36XX_PDIV_SHIFT) |
246 (rate->sdiv << PLL36XX_SDIV_SHIFT);
247 __raw_writel(pll_con0, pll->con_reg);
248
249 pll_con1 &= ~(PLL36XX_KDIV_MASK << PLL36XX_KDIV_SHIFT);
250 pll_con1 |= rate->kdiv << PLL36XX_KDIV_SHIFT;
251 __raw_writel(pll_con1, pll->con_reg + 4);
252
253 /* wait_lock_time */
254 do {
255 cpu_relax();
256 tmp = __raw_readl(pll->con_reg);
257 } while (!(tmp & (1 << PLL36XX_LOCK_STAT_SHIFT)));
258
259 return 0;
260}
261
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900262static const struct clk_ops samsung_pll36xx_clk_ops = {
263 .recalc_rate = samsung_pll36xx_recalc_rate,
Vikas Sajjanbb4278d2013-06-11 15:01:14 +0530264 .set_rate = samsung_pll36xx_set_rate,
265 .round_rate = samsung_pll_round_rate,
266};
267
268static const struct clk_ops samsung_pll36xx_clk_min_ops = {
269 .recalc_rate = samsung_pll36xx_recalc_rate,
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900270};
271
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900272/*
273 * PLL45xx Clock Type
274 */
275
276#define PLL45XX_MDIV_MASK (0x3FF)
277#define PLL45XX_PDIV_MASK (0x3F)
278#define PLL45XX_SDIV_MASK (0x7)
279#define PLL45XX_MDIV_SHIFT (16)
280#define PLL45XX_PDIV_SHIFT (8)
281#define PLL45XX_SDIV_SHIFT (0)
282
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900283static unsigned long samsung_pll45xx_recalc_rate(struct clk_hw *hw,
284 unsigned long parent_rate)
285{
Tomasz Figa52b06012013-08-26 19:09:04 +0200286 struct samsung_clk_pll *pll = to_clk_pll(hw);
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900287 u32 mdiv, pdiv, sdiv, pll_con;
288 u64 fvco = parent_rate;
289
290 pll_con = __raw_readl(pll->con_reg);
291 mdiv = (pll_con >> PLL45XX_MDIV_SHIFT) & PLL45XX_MDIV_MASK;
292 pdiv = (pll_con >> PLL45XX_PDIV_SHIFT) & PLL45XX_PDIV_MASK;
293 sdiv = (pll_con >> PLL45XX_SDIV_SHIFT) & PLL45XX_SDIV_MASK;
294
295 if (pll->type == pll_4508)
296 sdiv = sdiv - 1;
297
298 fvco *= mdiv;
299 do_div(fvco, (pdiv << sdiv));
300
301 return (unsigned long)fvco;
302}
303
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900304static const struct clk_ops samsung_pll45xx_clk_ops = {
305 .recalc_rate = samsung_pll45xx_recalc_rate,
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900306};
307
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900308/*
309 * PLL46xx Clock Type
310 */
311
312#define PLL46XX_MDIV_MASK (0x1FF)
313#define PLL46XX_PDIV_MASK (0x3F)
314#define PLL46XX_SDIV_MASK (0x7)
315#define PLL46XX_MDIV_SHIFT (16)
316#define PLL46XX_PDIV_SHIFT (8)
317#define PLL46XX_SDIV_SHIFT (0)
318
319#define PLL46XX_KDIV_MASK (0xFFFF)
320#define PLL4650C_KDIV_MASK (0xFFF)
321#define PLL46XX_KDIV_SHIFT (0)
322
323struct samsung_clk_pll46xx {
324 struct clk_hw hw;
325 enum pll46xx_type type;
326 const void __iomem *con_reg;
327};
328
329#define to_clk_pll46xx(_hw) container_of(_hw, struct samsung_clk_pll46xx, hw)
330
331static unsigned long samsung_pll46xx_recalc_rate(struct clk_hw *hw,
332 unsigned long parent_rate)
333{
334 struct samsung_clk_pll46xx *pll = to_clk_pll46xx(hw);
335 u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1, shift;
336 u64 fvco = parent_rate;
337
338 pll_con0 = __raw_readl(pll->con_reg);
339 pll_con1 = __raw_readl(pll->con_reg + 4);
340 mdiv = (pll_con0 >> PLL46XX_MDIV_SHIFT) & PLL46XX_MDIV_MASK;
341 pdiv = (pll_con0 >> PLL46XX_PDIV_SHIFT) & PLL46XX_PDIV_MASK;
342 sdiv = (pll_con0 >> PLL46XX_SDIV_SHIFT) & PLL46XX_SDIV_MASK;
343 kdiv = pll->type == pll_4650c ? pll_con1 & PLL4650C_KDIV_MASK :
344 pll_con1 & PLL46XX_KDIV_MASK;
345
346 shift = pll->type == pll_4600 ? 16 : 10;
347 fvco *= (mdiv << shift) + kdiv;
348 do_div(fvco, (pdiv << sdiv));
349 fvco >>= shift;
350
351 return (unsigned long)fvco;
352}
353
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900354static const struct clk_ops samsung_pll46xx_clk_ops = {
355 .recalc_rate = samsung_pll46xx_recalc_rate,
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900356};
357
358struct clk * __init samsung_clk_register_pll46xx(const char *name,
359 const char *pname, const void __iomem *con_reg,
360 enum pll46xx_type type)
361{
362 struct samsung_clk_pll46xx *pll;
363 struct clk *clk;
364 struct clk_init_data init;
365
366 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
367 if (!pll) {
368 pr_err("%s: could not allocate pll clk %s\n", __func__, name);
369 return NULL;
370 }
371
372 init.name = name;
373 init.ops = &samsung_pll46xx_clk_ops;
374 init.flags = CLK_GET_RATE_NOCACHE;
375 init.parent_names = &pname;
376 init.num_parents = 1;
377
378 pll->hw.init = &init;
379 pll->con_reg = con_reg;
380 pll->type = type;
381
382 clk = clk_register(NULL, &pll->hw);
383 if (IS_ERR(clk)) {
384 pr_err("%s: failed to register pll clock %s\n", __func__,
385 name);
386 kfree(pll);
387 }
388
389 if (clk_register_clkdev(clk, name, NULL))
390 pr_err("%s: failed to register lookup for %s", __func__, name);
391
392 return clk;
393}
394
395/*
Tomasz Figaeb527122013-07-23 01:49:19 +0200396 * PLL6552 Clock Type
397 */
398
Tomasz Figaeb527122013-07-23 01:49:19 +0200399#define PLL6552_MDIV_MASK 0x3ff
400#define PLL6552_PDIV_MASK 0x3f
401#define PLL6552_SDIV_MASK 0x7
402#define PLL6552_MDIV_SHIFT 16
403#define PLL6552_PDIV_SHIFT 8
404#define PLL6552_SDIV_SHIFT 0
405
Tomasz Figaeb527122013-07-23 01:49:19 +0200406static unsigned long samsung_pll6552_recalc_rate(struct clk_hw *hw,
407 unsigned long parent_rate)
408{
Tomasz Figa40ef7232013-08-21 02:33:21 +0200409 struct samsung_clk_pll *pll = to_clk_pll(hw);
Tomasz Figaeb527122013-07-23 01:49:19 +0200410 u32 mdiv, pdiv, sdiv, pll_con;
411 u64 fvco = parent_rate;
412
Tomasz Figa40ef7232013-08-21 02:33:21 +0200413 pll_con = __raw_readl(pll->con_reg);
Tomasz Figaeb527122013-07-23 01:49:19 +0200414 mdiv = (pll_con >> PLL6552_MDIV_SHIFT) & PLL6552_MDIV_MASK;
415 pdiv = (pll_con >> PLL6552_PDIV_SHIFT) & PLL6552_PDIV_MASK;
416 sdiv = (pll_con >> PLL6552_SDIV_SHIFT) & PLL6552_SDIV_MASK;
417
418 fvco *= mdiv;
419 do_div(fvco, (pdiv << sdiv));
420
421 return (unsigned long)fvco;
422}
423
424static const struct clk_ops samsung_pll6552_clk_ops = {
425 .recalc_rate = samsung_pll6552_recalc_rate,
426};
427
Tomasz Figaeb527122013-07-23 01:49:19 +0200428/*
429 * PLL6553 Clock Type
430 */
431
Tomasz Figaeb527122013-07-23 01:49:19 +0200432#define PLL6553_MDIV_MASK 0xff
433#define PLL6553_PDIV_MASK 0x3f
434#define PLL6553_SDIV_MASK 0x7
435#define PLL6553_KDIV_MASK 0xffff
436#define PLL6553_MDIV_SHIFT 16
437#define PLL6553_PDIV_SHIFT 8
438#define PLL6553_SDIV_SHIFT 0
439#define PLL6553_KDIV_SHIFT 0
440
Tomasz Figaeb527122013-07-23 01:49:19 +0200441static unsigned long samsung_pll6553_recalc_rate(struct clk_hw *hw,
442 unsigned long parent_rate)
443{
Tomasz Figa40ef7232013-08-21 02:33:21 +0200444 struct samsung_clk_pll *pll = to_clk_pll(hw);
Tomasz Figaeb527122013-07-23 01:49:19 +0200445 u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1;
446 u64 fvco = parent_rate;
447
Tomasz Figa40ef7232013-08-21 02:33:21 +0200448 pll_con0 = __raw_readl(pll->con_reg);
449 pll_con1 = __raw_readl(pll->con_reg + 0x4);
Tomasz Figaeb527122013-07-23 01:49:19 +0200450 mdiv = (pll_con0 >> PLL6553_MDIV_SHIFT) & PLL6553_MDIV_MASK;
451 pdiv = (pll_con0 >> PLL6553_PDIV_SHIFT) & PLL6553_PDIV_MASK;
452 sdiv = (pll_con0 >> PLL6553_SDIV_SHIFT) & PLL6553_SDIV_MASK;
453 kdiv = (pll_con1 >> PLL6553_KDIV_SHIFT) & PLL6553_KDIV_MASK;
454
455 fvco *= (mdiv << 16) + kdiv;
456 do_div(fvco, (pdiv << sdiv));
457 fvco >>= 16;
458
459 return (unsigned long)fvco;
460}
461
462static const struct clk_ops samsung_pll6553_clk_ops = {
463 .recalc_rate = samsung_pll6553_recalc_rate,
464};
465
Tomasz Figaeb527122013-07-23 01:49:19 +0200466/*
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900467 * PLL2550x Clock Type
468 */
469
470#define PLL2550X_R_MASK (0x1)
471#define PLL2550X_P_MASK (0x3F)
472#define PLL2550X_M_MASK (0x3FF)
473#define PLL2550X_S_MASK (0x7)
474#define PLL2550X_R_SHIFT (20)
475#define PLL2550X_P_SHIFT (14)
476#define PLL2550X_M_SHIFT (4)
477#define PLL2550X_S_SHIFT (0)
478
479struct samsung_clk_pll2550x {
480 struct clk_hw hw;
481 const void __iomem *reg_base;
482 unsigned long offset;
483};
484
485#define to_clk_pll2550x(_hw) container_of(_hw, struct samsung_clk_pll2550x, hw)
486
487static unsigned long samsung_pll2550x_recalc_rate(struct clk_hw *hw,
488 unsigned long parent_rate)
489{
490 struct samsung_clk_pll2550x *pll = to_clk_pll2550x(hw);
491 u32 r, p, m, s, pll_stat;
492 u64 fvco = parent_rate;
493
494 pll_stat = __raw_readl(pll->reg_base + pll->offset * 3);
495 r = (pll_stat >> PLL2550X_R_SHIFT) & PLL2550X_R_MASK;
496 if (!r)
497 return 0;
498 p = (pll_stat >> PLL2550X_P_SHIFT) & PLL2550X_P_MASK;
499 m = (pll_stat >> PLL2550X_M_SHIFT) & PLL2550X_M_MASK;
500 s = (pll_stat >> PLL2550X_S_SHIFT) & PLL2550X_S_MASK;
501
502 fvco *= m;
503 do_div(fvco, (p << s));
504
505 return (unsigned long)fvco;
506}
507
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900508static const struct clk_ops samsung_pll2550x_clk_ops = {
509 .recalc_rate = samsung_pll2550x_recalc_rate,
Thomas Abraham1c4c5fe2013-03-09 17:02:48 +0900510};
511
512struct clk * __init samsung_clk_register_pll2550x(const char *name,
513 const char *pname, const void __iomem *reg_base,
514 const unsigned long offset)
515{
516 struct samsung_clk_pll2550x *pll;
517 struct clk *clk;
518 struct clk_init_data init;
519
520 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
521 if (!pll) {
522 pr_err("%s: could not allocate pll clk %s\n", __func__, name);
523 return NULL;
524 }
525
526 init.name = name;
527 init.ops = &samsung_pll2550x_clk_ops;
528 init.flags = CLK_GET_RATE_NOCACHE;
529 init.parent_names = &pname;
530 init.num_parents = 1;
531
532 pll->hw.init = &init;
533 pll->reg_base = reg_base;
534 pll->offset = offset;
535
536 clk = clk_register(NULL, &pll->hw);
537 if (IS_ERR(clk)) {
538 pr_err("%s: failed to register pll clock %s\n", __func__,
539 name);
540 kfree(pll);
541 }
542
543 if (clk_register_clkdev(clk, name, NULL))
544 pr_err("%s: failed to register lookup for %s", __func__, name);
545
546 return clk;
547}
Yadwinder Singh Brar07dc76f2013-06-11 15:01:07 +0530548
549static void __init _samsung_clk_register_pll(struct samsung_pll_clock *pll_clk,
550 void __iomem *base)
551{
552 struct samsung_clk_pll *pll;
553 struct clk *clk;
554 struct clk_init_data init;
Yadwinder Singh Brar3ff6e0d2013-06-11 15:01:12 +0530555 int ret, len;
Yadwinder Singh Brar07dc76f2013-06-11 15:01:07 +0530556
557 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
558 if (!pll) {
559 pr_err("%s: could not allocate pll clk %s\n",
560 __func__, pll_clk->name);
561 return;
562 }
563
564 init.name = pll_clk->name;
565 init.flags = pll_clk->flags;
566 init.parent_names = &pll_clk->parent_name;
567 init.num_parents = 1;
568
Yadwinder Singh Brar3ff6e0d2013-06-11 15:01:12 +0530569 if (pll_clk->rate_table) {
570 /* find count of rates in rate_table */
571 for (len = 0; pll_clk->rate_table[len].rate != 0; )
572 len++;
573
574 pll->rate_count = len;
575 pll->rate_table = kmemdup(pll_clk->rate_table,
576 pll->rate_count *
577 sizeof(struct samsung_pll_rate_table),
578 GFP_KERNEL);
579 WARN(!pll->rate_table,
580 "%s: could not allocate rate table for %s\n",
581 __func__, pll_clk->name);
582 }
583
Yadwinder Singh Brar07dc76f2013-06-11 15:01:07 +0530584 switch (pll_clk->type) {
585 /* clk_ops for 35xx and 2550 are similar */
586 case pll_35xx:
587 case pll_2550:
Yadwinder Singh Brardfa89312013-06-11 15:01:13 +0530588 if (!pll->rate_table)
589 init.ops = &samsung_pll35xx_clk_min_ops;
590 else
591 init.ops = &samsung_pll35xx_clk_ops;
Yadwinder Singh Brar07dc76f2013-06-11 15:01:07 +0530592 break;
Tomasz Figa52b06012013-08-26 19:09:04 +0200593 case pll_4500:
594 case pll_4502:
595 case pll_4508:
596 init.ops = &samsung_pll45xx_clk_ops;
597 break;
Yadwinder Singh Brar07dc76f2013-06-11 15:01:07 +0530598 /* clk_ops for 36xx and 2650 are similar */
599 case pll_36xx:
600 case pll_2650:
Vikas Sajjanbb4278d2013-06-11 15:01:14 +0530601 if (!pll->rate_table)
602 init.ops = &samsung_pll36xx_clk_min_ops;
603 else
604 init.ops = &samsung_pll36xx_clk_ops;
Yadwinder Singh Brar07dc76f2013-06-11 15:01:07 +0530605 break;
Tomasz Figa40ef7232013-08-21 02:33:21 +0200606 case pll_6552:
607 init.ops = &samsung_pll6552_clk_ops;
608 break;
609 case pll_6553:
610 init.ops = &samsung_pll6553_clk_ops;
611 break;
Yadwinder Singh Brar07dc76f2013-06-11 15:01:07 +0530612 default:
613 pr_warn("%s: Unknown pll type for pll clk %s\n",
614 __func__, pll_clk->name);
615 }
616
617 pll->hw.init = &init;
618 pll->type = pll_clk->type;
619 pll->lock_reg = base + pll_clk->lock_offset;
620 pll->con_reg = base + pll_clk->con_offset;
621
622 clk = clk_register(NULL, &pll->hw);
623 if (IS_ERR(clk)) {
624 pr_err("%s: failed to register pll clock %s : %ld\n",
625 __func__, pll_clk->name, PTR_ERR(clk));
626 kfree(pll);
627 return;
628 }
629
630 samsung_clk_add_lookup(clk, pll_clk->id);
631
632 if (!pll_clk->alias)
633 return;
634
635 ret = clk_register_clkdev(clk, pll_clk->alias, pll_clk->dev_name);
636 if (ret)
637 pr_err("%s: failed to register lookup for %s : %d",
638 __func__, pll_clk->name, ret);
639}
640
641void __init samsung_clk_register_pll(struct samsung_pll_clock *pll_list,
642 unsigned int nr_pll, void __iomem *base)
643{
644 int cnt;
645
646 for (cnt = 0; cnt < nr_pll; cnt++)
647 _samsung_clk_register_pll(&pll_list[cnt], base);
648}