blob: 20b105584f82fb9bad9ba17ec4b1a6136affc656 [file] [log] [blame]
Tang Yuantian555eae92013-04-09 16:46:26 +08001/*
2 * Copyright 2013 Freescale Semiconductor, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
Tang Yuantian93a17c02015-01-15 14:03:41 +08008 * clock driver for Freescale QorIQ SoCs.
Tang Yuantian555eae92013-04-09 16:46:26 +08009 */
Emil Medvec88b2b62015-01-21 04:03:29 -060010
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
Scott Wood0dfc86b2015-09-19 23:29:54 -050013#include <linux/clk.h>
Tang Yuantian555eae92013-04-09 16:46:26 +080014#include <linux/clk-provider.h>
Scott Wood0dfc86b2015-09-19 23:29:54 -050015#include <linux/fsl/guts.h>
Tang Yuantian555eae92013-04-09 16:46:26 +080016#include <linux/io.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
Rob Herringc11eede2013-11-10 23:19:08 -060019#include <linux/of_address.h>
Tang Yuantian555eae92013-04-09 16:46:26 +080020#include <linux/of_platform.h>
21#include <linux/of.h>
22#include <linux/slab.h>
23
Scott Wood0dfc86b2015-09-19 23:29:54 -050024#define PLL_DIV1 0
25#define PLL_DIV2 1
26#define PLL_DIV3 2
27#define PLL_DIV4 3
28
29#define PLATFORM_PLL 0
30#define CGA_PLL1 1
31#define CGA_PLL2 2
32#define CGA_PLL3 3
33#define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */
34#define CGB_PLL1 4
35#define CGB_PLL2 5
36
37struct clockgen_pll_div {
38 struct clk *clk;
39 char name[32];
Tang Yuantian555eae92013-04-09 16:46:26 +080040};
41
Scott Wood0dfc86b2015-09-19 23:29:54 -050042struct clockgen_pll {
43 struct clockgen_pll_div div[4];
44};
Tang Yuantian555eae92013-04-09 16:46:26 +080045
Scott Wood0dfc86b2015-09-19 23:29:54 -050046#define CLKSEL_VALID 1
47#define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */
48
49struct clockgen_sourceinfo {
50 u32 flags; /* CLKSEL_xxx */
51 int pll; /* CGx_PLLn */
52 int div; /* PLL_DIVn */
53};
54
55#define NUM_MUX_PARENTS 16
56
57struct clockgen_muxinfo {
58 struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS];
59};
60
61#define NUM_HWACCEL 5
62#define NUM_CMUX 8
63
64struct clockgen;
65
66/*
67 * cmux freq must be >= platform pll.
68 * If not set, cmux freq must be >= platform pll/2
69 */
70#define CG_CMUX_GE_PLAT 1
Scott Wood9e19ca22015-09-19 23:29:55 -050071
Scott Wood0dfc86b2015-09-19 23:29:54 -050072#define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */
Scott Wood9e19ca22015-09-19 23:29:55 -050073#define CG_VER3 4 /* version 3 cg: reg layout different */
74#define CG_LITTLE_ENDIAN 8
Scott Wood0dfc86b2015-09-19 23:29:54 -050075
76struct clockgen_chipinfo {
77 const char *compat, *guts_compat;
78 const struct clockgen_muxinfo *cmux_groups[2];
79 const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL];
80 void (*init_periph)(struct clockgen *cg);
81 int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */
82 u32 pll_mask; /* 1 << n bit set if PLL n is valid */
83 u32 flags; /* CG_xxx */
84};
85
86struct clockgen {
87 struct device_node *node;
88 void __iomem *regs;
89 struct clockgen_chipinfo info; /* mutable copy */
90 struct clk *sysclk;
91 struct clockgen_pll pll[6];
92 struct clk *cmux[NUM_CMUX];
93 struct clk *hwaccel[NUM_HWACCEL];
94 struct clk *fman[2];
95 struct ccsr_guts __iomem *guts;
96};
97
98static struct clockgen clockgen;
99
Scott Wood9e19ca22015-09-19 23:29:55 -0500100static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg)
101{
102 if (cg->info.flags & CG_LITTLE_ENDIAN)
103 iowrite32(val, reg);
104 else
105 iowrite32be(val, reg);
106}
107
108static u32 cg_in(struct clockgen *cg, u32 __iomem *reg)
109{
110 u32 val;
111
112 if (cg->info.flags & CG_LITTLE_ENDIAN)
113 val = ioread32(reg);
114 else
115 val = ioread32be(reg);
116
117 return val;
118}
119
Scott Wood0dfc86b2015-09-19 23:29:54 -0500120static const struct clockgen_muxinfo p2041_cmux_grp1 = {
121 {
122 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
123 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
124 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
125 }
126};
127
128static const struct clockgen_muxinfo p2041_cmux_grp2 = {
129 {
130 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
Scott Wood2c7693e2015-10-22 23:21:46 -0500131 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
132 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
Scott Wood0dfc86b2015-09-19 23:29:54 -0500133 }
134};
135
136static const struct clockgen_muxinfo p5020_cmux_grp1 = {
137 {
138 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
139 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
140 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
141 }
142};
143
144static const struct clockgen_muxinfo p5020_cmux_grp2 = {
145 {
146 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
147 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
148 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
149 }
150};
151
152static const struct clockgen_muxinfo p5040_cmux_grp1 = {
153 {
154 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
155 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
156 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
157 [5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 },
158 }
159};
160
161static const struct clockgen_muxinfo p5040_cmux_grp2 = {
162 {
163 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
164 [1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 },
165 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
166 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
167 }
168};
169
170static const struct clockgen_muxinfo p4080_cmux_grp1 = {
171 {
172 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
173 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
174 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
175 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
176 [8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 },
177 }
178};
179
180static const struct clockgen_muxinfo p4080_cmux_grp2 = {
181 {
182 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
183 [8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
184 [9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
185 [12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 },
186 [13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 },
187 }
188};
189
190static const struct clockgen_muxinfo t1023_cmux = {
191 {
192 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
193 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
194 }
195};
196
197static const struct clockgen_muxinfo t1040_cmux = {
198 {
199 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
200 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
201 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
202 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
203 }
204};
205
206
207static const struct clockgen_muxinfo clockgen2_cmux_cga = {
208 {
209 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
210 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
211 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
212 {},
213 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
214 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
215 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
216 {},
217 { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
218 { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
219 { CLKSEL_VALID, CGA_PLL3, PLL_DIV4 },
220 },
221};
222
223static const struct clockgen_muxinfo clockgen2_cmux_cga12 = {
224 {
225 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
226 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
227 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
228 {},
229 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
230 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
231 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
232 },
233};
234
235static const struct clockgen_muxinfo clockgen2_cmux_cgb = {
236 {
237 { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 },
238 { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
239 { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
240 {},
241 { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 },
242 { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
243 { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
244 },
245};
246
Hou Zhiqiange994412c2015-10-23 16:01:21 +0800247static const struct clockgen_muxinfo ls1043a_hwa1 = {
248 {
249 {},
250 {},
251 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
252 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
253 {},
254 {},
255 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
256 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
257 },
258};
259
260static const struct clockgen_muxinfo ls1043a_hwa2 = {
261 {
262 {},
263 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
264 {},
265 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
266 },
267};
268
Scott Wood0dfc86b2015-09-19 23:29:54 -0500269static const struct clockgen_muxinfo t1023_hwa1 = {
270 {
271 {},
272 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
273 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
274 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
275 },
276};
277
278static const struct clockgen_muxinfo t1023_hwa2 = {
279 {
280 [6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
281 },
282};
283
284static const struct clockgen_muxinfo t2080_hwa1 = {
285 {
286 {},
287 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
288 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
289 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
290 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
291 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
292 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
293 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
294 },
295};
296
297static const struct clockgen_muxinfo t2080_hwa2 = {
298 {
299 {},
300 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
301 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
302 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
303 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
304 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
305 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
306 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
307 },
308};
309
310static const struct clockgen_muxinfo t4240_hwa1 = {
311 {
312 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 },
313 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
314 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
315 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
316 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
317 {},
318 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
319 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
320 },
321};
322
323static const struct clockgen_muxinfo t4240_hwa4 = {
324 {
325 [2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
326 [3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
327 [4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
328 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
329 [6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
330 },
331};
332
333static const struct clockgen_muxinfo t4240_hwa5 = {
334 {
335 [2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
336 [3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 },
337 [4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
338 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
339 [6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
340 [7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
341 },
342};
343
344#define RCWSR7_FM1_CLK_SEL 0x40000000
345#define RCWSR7_FM2_CLK_SEL 0x20000000
346#define RCWSR7_HWA_ASYNC_DIV 0x04000000
347
348static void __init p2041_init_periph(struct clockgen *cg)
Tang Yuantian555eae92013-04-09 16:46:26 +0800349{
Scott Wood0dfc86b2015-09-19 23:29:54 -0500350 u32 reg;
351
352 reg = ioread32be(&cg->guts->rcwsr[7]);
353
354 if (reg & RCWSR7_FM1_CLK_SEL)
355 cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk;
356 else
357 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
358}
359
360static void __init p4080_init_periph(struct clockgen *cg)
361{
362 u32 reg;
363
364 reg = ioread32be(&cg->guts->rcwsr[7]);
365
366 if (reg & RCWSR7_FM1_CLK_SEL)
367 cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
368 else
369 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
370
371 if (reg & RCWSR7_FM2_CLK_SEL)
372 cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
373 else
374 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
375}
376
377static void __init p5020_init_periph(struct clockgen *cg)
378{
379 u32 reg;
380 int div = PLL_DIV2;
381
382 reg = ioread32be(&cg->guts->rcwsr[7]);
383 if (reg & RCWSR7_HWA_ASYNC_DIV)
384 div = PLL_DIV4;
385
386 if (reg & RCWSR7_FM1_CLK_SEL)
387 cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk;
388 else
389 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
390}
391
392static void __init p5040_init_periph(struct clockgen *cg)
393{
394 u32 reg;
395 int div = PLL_DIV2;
396
397 reg = ioread32be(&cg->guts->rcwsr[7]);
398 if (reg & RCWSR7_HWA_ASYNC_DIV)
399 div = PLL_DIV4;
400
401 if (reg & RCWSR7_FM1_CLK_SEL)
402 cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk;
403 else
404 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
405
406 if (reg & RCWSR7_FM2_CLK_SEL)
407 cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk;
408 else
409 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
410}
411
412static void __init t1023_init_periph(struct clockgen *cg)
413{
414 cg->fman[0] = cg->hwaccel[1];
415}
416
417static void __init t1040_init_periph(struct clockgen *cg)
418{
419 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk;
420}
421
422static void __init t2080_init_periph(struct clockgen *cg)
423{
424 cg->fman[0] = cg->hwaccel[0];
425}
426
427static void __init t4240_init_periph(struct clockgen *cg)
428{
429 cg->fman[0] = cg->hwaccel[3];
430 cg->fman[1] = cg->hwaccel[4];
431}
432
433static const struct clockgen_chipinfo chipinfo[] = {
434 {
435 .compat = "fsl,b4420-clockgen",
436 .guts_compat = "fsl,b4860-device-config",
437 .init_periph = t2080_init_periph,
438 .cmux_groups = {
439 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
440 },
441 .hwaccel = {
442 &t2080_hwa1
443 },
444 .cmux_to_group = {
445 0, 1, 1, 1, -1
446 },
447 .pll_mask = 0x3f,
448 .flags = CG_PLL_8BIT,
449 },
450 {
451 .compat = "fsl,b4860-clockgen",
452 .guts_compat = "fsl,b4860-device-config",
453 .init_periph = t2080_init_periph,
454 .cmux_groups = {
455 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
456 },
457 .hwaccel = {
458 &t2080_hwa1
459 },
460 .cmux_to_group = {
461 0, 1, 1, 1, -1
462 },
463 .pll_mask = 0x3f,
464 .flags = CG_PLL_8BIT,
465 },
466 {
467 .compat = "fsl,ls1021a-clockgen",
468 .cmux_groups = {
469 &t1023_cmux
470 },
471 .cmux_to_group = {
472 0, -1
473 },
474 .pll_mask = 0x03,
475 },
476 {
Hou Zhiqiange994412c2015-10-23 16:01:21 +0800477 .compat = "fsl,ls1043a-clockgen",
478 .init_periph = t2080_init_periph,
479 .cmux_groups = {
480 &t1040_cmux
481 },
482 .hwaccel = {
483 &ls1043a_hwa1, &ls1043a_hwa2
484 },
485 .cmux_to_group = {
486 0, -1
487 },
488 .pll_mask = 0x07,
489 .flags = CG_PLL_8BIT,
490 },
491 {
Scott Wood9e19ca22015-09-19 23:29:55 -0500492 .compat = "fsl,ls2080a-clockgen",
493 .cmux_groups = {
494 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
495 },
496 .cmux_to_group = {
497 0, 0, 1, 1, -1
498 },
499 .pll_mask = 0x37,
500 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
501 },
502 {
Scott Wood0dfc86b2015-09-19 23:29:54 -0500503 .compat = "fsl,p2041-clockgen",
504 .guts_compat = "fsl,qoriq-device-config-1.0",
505 .init_periph = p2041_init_periph,
506 .cmux_groups = {
507 &p2041_cmux_grp1, &p2041_cmux_grp2
508 },
509 .cmux_to_group = {
510 0, 0, 1, 1, -1
511 },
512 .pll_mask = 0x07,
513 },
514 {
515 .compat = "fsl,p3041-clockgen",
516 .guts_compat = "fsl,qoriq-device-config-1.0",
517 .init_periph = p2041_init_periph,
518 .cmux_groups = {
519 &p2041_cmux_grp1, &p2041_cmux_grp2
520 },
521 .cmux_to_group = {
522 0, 0, 1, 1, -1
523 },
524 .pll_mask = 0x07,
525 },
526 {
527 .compat = "fsl,p4080-clockgen",
528 .guts_compat = "fsl,qoriq-device-config-1.0",
529 .init_periph = p4080_init_periph,
530 .cmux_groups = {
531 &p4080_cmux_grp1, &p4080_cmux_grp2
532 },
533 .cmux_to_group = {
534 0, 0, 0, 0, 1, 1, 1, 1
535 },
536 .pll_mask = 0x1f,
537 },
538 {
539 .compat = "fsl,p5020-clockgen",
540 .guts_compat = "fsl,qoriq-device-config-1.0",
541 .init_periph = p5020_init_periph,
542 .cmux_groups = {
543 &p2041_cmux_grp1, &p2041_cmux_grp2
544 },
545 .cmux_to_group = {
546 0, 1, -1
547 },
548 .pll_mask = 0x07,
549 },
550 {
551 .compat = "fsl,p5040-clockgen",
552 .guts_compat = "fsl,p5040-device-config",
553 .init_periph = p5040_init_periph,
554 .cmux_groups = {
555 &p5040_cmux_grp1, &p5040_cmux_grp2
556 },
557 .cmux_to_group = {
558 0, 0, 1, 1, -1
559 },
560 .pll_mask = 0x0f,
561 },
562 {
563 .compat = "fsl,t1023-clockgen",
564 .guts_compat = "fsl,t1023-device-config",
565 .init_periph = t1023_init_periph,
566 .cmux_groups = {
567 &t1023_cmux
568 },
569 .hwaccel = {
570 &t1023_hwa1, &t1023_hwa2
571 },
572 .cmux_to_group = {
573 0, 0, -1
574 },
575 .pll_mask = 0x03,
576 .flags = CG_PLL_8BIT,
577 },
578 {
579 .compat = "fsl,t1040-clockgen",
580 .guts_compat = "fsl,t1040-device-config",
581 .init_periph = t1040_init_periph,
582 .cmux_groups = {
583 &t1040_cmux
584 },
585 .cmux_to_group = {
586 0, 0, 0, 0, -1
587 },
588 .pll_mask = 0x07,
589 .flags = CG_PLL_8BIT,
590 },
591 {
592 .compat = "fsl,t2080-clockgen",
593 .guts_compat = "fsl,t2080-device-config",
594 .init_periph = t2080_init_periph,
595 .cmux_groups = {
596 &clockgen2_cmux_cga12
597 },
598 .hwaccel = {
599 &t2080_hwa1, &t2080_hwa2
600 },
601 .cmux_to_group = {
602 0, -1
603 },
604 .pll_mask = 0x07,
605 .flags = CG_PLL_8BIT,
606 },
607 {
608 .compat = "fsl,t4240-clockgen",
609 .guts_compat = "fsl,t4240-device-config",
610 .init_periph = t4240_init_periph,
611 .cmux_groups = {
612 &clockgen2_cmux_cga, &clockgen2_cmux_cgb
613 },
614 .hwaccel = {
615 &t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5
616 },
617 .cmux_to_group = {
618 0, 0, 1, -1
619 },
620 .pll_mask = 0x3f,
621 .flags = CG_PLL_8BIT,
622 },
623 {},
624};
625
626struct mux_hwclock {
627 struct clk_hw hw;
628 struct clockgen *cg;
629 const struct clockgen_muxinfo *info;
630 u32 __iomem *reg;
631 u8 parent_to_clksel[NUM_MUX_PARENTS];
632 s8 clksel_to_parent[NUM_MUX_PARENTS];
633 int num_parents;
634};
635
636#define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw)
637#define CLKSEL_MASK 0x78000000
638#define CLKSEL_SHIFT 27
639
640static int mux_set_parent(struct clk_hw *hw, u8 idx)
641{
642 struct mux_hwclock *hwc = to_mux_hwclock(hw);
Tang Yuantian555eae92013-04-09 16:46:26 +0800643 u32 clksel;
644
Scott Wood0dfc86b2015-09-19 23:29:54 -0500645 if (idx >= hwc->num_parents)
646 return -EINVAL;
647
648 clksel = hwc->parent_to_clksel[idx];
Scott Wood9e19ca22015-09-19 23:29:55 -0500649 cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg);
Tang Yuantian555eae92013-04-09 16:46:26 +0800650
651 return 0;
652}
653
Scott Wood0dfc86b2015-09-19 23:29:54 -0500654static u8 mux_get_parent(struct clk_hw *hw)
Tang Yuantian555eae92013-04-09 16:46:26 +0800655{
Scott Wood0dfc86b2015-09-19 23:29:54 -0500656 struct mux_hwclock *hwc = to_mux_hwclock(hw);
Tang Yuantian555eae92013-04-09 16:46:26 +0800657 u32 clksel;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500658 s8 ret;
Tang Yuantian555eae92013-04-09 16:46:26 +0800659
Scott Wood9e19ca22015-09-19 23:29:55 -0500660 clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
Tang Yuantian555eae92013-04-09 16:46:26 +0800661
Scott Wood0dfc86b2015-09-19 23:29:54 -0500662 ret = hwc->clksel_to_parent[clksel];
663 if (ret < 0) {
664 pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg);
665 return 0;
666 }
667
668 return ret;
Tang Yuantian555eae92013-04-09 16:46:26 +0800669}
670
Emil Medve334680d2015-01-21 04:03:27 -0600671static const struct clk_ops cmux_ops = {
Scott Wood0dfc86b2015-09-19 23:29:54 -0500672 .get_parent = mux_get_parent,
673 .set_parent = mux_set_parent,
Tang Yuantian555eae92013-04-09 16:46:26 +0800674};
675
Scott Wood0dfc86b2015-09-19 23:29:54 -0500676/*
677 * Don't allow setting for now, as the clock options haven't been
678 * sanitized for additional restrictions.
679 */
680static const struct clk_ops hwaccel_ops = {
681 .get_parent = mux_get_parent,
682};
683
684static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg,
685 struct mux_hwclock *hwc,
686 int idx)
687{
688 int pll, div;
689
690 if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID))
691 return NULL;
692
693 pll = hwc->info->clksel[idx].pll;
694 div = hwc->info->clksel[idx].div;
695
696 return &cg->pll[pll].div[div];
697}
698
699static struct clk * __init create_mux_common(struct clockgen *cg,
700 struct mux_hwclock *hwc,
701 const struct clk_ops *ops,
702 unsigned long min_rate,
703 unsigned long pct80_rate,
704 const char *fmt, int idx)
705{
706 struct clk_init_data init = {};
707 struct clk *clk;
708 const struct clockgen_pll_div *div;
709 const char *parent_names[NUM_MUX_PARENTS];
710 char name[32];
711 int i, j;
712
713 snprintf(name, sizeof(name), fmt, idx);
714
715 for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) {
716 unsigned long rate;
717
718 hwc->clksel_to_parent[i] = -1;
719
720 div = get_pll_div(cg, hwc, i);
721 if (!div)
722 continue;
723
724 rate = clk_get_rate(div->clk);
725
726 if (hwc->info->clksel[i].flags & CLKSEL_80PCT &&
727 rate > pct80_rate)
728 continue;
729 if (rate < min_rate)
730 continue;
731
732 parent_names[j] = div->name;
733 hwc->parent_to_clksel[j] = i;
734 hwc->clksel_to_parent[i] = j;
735 j++;
736 }
737
738 init.name = name;
739 init.ops = ops;
740 init.parent_names = parent_names;
741 init.num_parents = hwc->num_parents = j;
742 init.flags = 0;
743 hwc->hw.init = &init;
744 hwc->cg = cg;
745
746 clk = clk_register(NULL, &hwc->hw);
747 if (IS_ERR(clk)) {
748 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
749 PTR_ERR(clk));
750 kfree(hwc);
751 return NULL;
752 }
753
754 return clk;
755}
756
757static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
758{
759 struct mux_hwclock *hwc;
760 const struct clockgen_pll_div *div;
761 unsigned long plat_rate, min_rate;
762 u64 pct80_rate;
763 u32 clksel;
764
765 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
766 if (!hwc)
767 return NULL;
768
Tang Yuantian89641932016-08-15 15:28:20 +0800769 if (cg->info.flags & CG_VER3)
770 hwc->reg = cg->regs + 0x70000 + 0x20 * idx;
771 else
772 hwc->reg = cg->regs + 0x20 * idx;
773
Scott Wood0dfc86b2015-09-19 23:29:54 -0500774 hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]];
775
776 /*
777 * Find the rate for the default clksel, and treat it as the
778 * maximum rated core frequency. If this is an incorrect
779 * assumption, certain clock options (possibly including the
780 * default clksel) may be inappropriately excluded on certain
781 * chips.
782 */
Scott Wood9e19ca22015-09-19 23:29:55 -0500783 clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500784 div = get_pll_div(cg, hwc, clksel);
Sudip Mukherjee279104e2015-11-23 15:36:50 +0530785 if (!div) {
786 kfree(hwc);
Scott Wood0dfc86b2015-09-19 23:29:54 -0500787 return NULL;
Sudip Mukherjee279104e2015-11-23 15:36:50 +0530788 }
Scott Wood0dfc86b2015-09-19 23:29:54 -0500789
790 pct80_rate = clk_get_rate(div->clk);
791 pct80_rate *= 8;
792 do_div(pct80_rate, 10);
793
794 plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
795
796 if (cg->info.flags & CG_CMUX_GE_PLAT)
797 min_rate = plat_rate;
798 else
799 min_rate = plat_rate / 2;
800
801 return create_mux_common(cg, hwc, &cmux_ops, min_rate,
802 pct80_rate, "cg-cmux%d", idx);
803}
804
805static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
806{
807 struct mux_hwclock *hwc;
808
809 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
810 if (!hwc)
811 return NULL;
812
813 hwc->reg = cg->regs + 0x20 * idx + 0x10;
814 hwc->info = cg->info.hwaccel[idx];
815
816 return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0,
817 "cg-hwaccel%d", idx);
818}
819
820static void __init create_muxes(struct clockgen *cg)
821{
822 int i;
823
824 for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) {
825 if (cg->info.cmux_to_group[i] < 0)
826 break;
827 if (cg->info.cmux_to_group[i] >=
828 ARRAY_SIZE(cg->info.cmux_groups)) {
829 WARN_ON_ONCE(1);
830 continue;
831 }
832
833 cg->cmux[i] = create_one_cmux(cg, i);
834 }
835
836 for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) {
837 if (!cg->info.hwaccel[i])
838 continue;
839
840 cg->hwaccel[i] = create_one_hwaccel(cg, i);
841 }
842}
843
844static void __init clockgen_init(struct device_node *np);
845
846/* Legacy nodes may get probed before the parent clockgen node */
847static void __init legacy_init_clockgen(struct device_node *np)
848{
849 if (!clockgen.node)
850 clockgen_init(of_get_parent(np));
851}
852
853/* Legacy node */
Tang Yuantian555eae92013-04-09 16:46:26 +0800854static void __init core_mux_init(struct device_node *np)
855{
856 struct clk *clk;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500857 struct resource res;
858 int idx, rc;
Tang Yuantian555eae92013-04-09 16:46:26 +0800859
Scott Wood0dfc86b2015-09-19 23:29:54 -0500860 legacy_init_clockgen(np);
Tang Yuantian555eae92013-04-09 16:46:26 +0800861
Scott Wood0dfc86b2015-09-19 23:29:54 -0500862 if (of_address_to_resource(np, 0, &res))
Tang Yuantian555eae92013-04-09 16:46:26 +0800863 return;
Tang Yuantian555eae92013-04-09 16:46:26 +0800864
Scott Wood0dfc86b2015-09-19 23:29:54 -0500865 idx = (res.start & 0xf0) >> 5;
866 clk = clockgen.cmux[idx];
Tang Yuantian555eae92013-04-09 16:46:26 +0800867
868 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
869 if (rc) {
Scott Wood0dfc86b2015-09-19 23:29:54 -0500870 pr_err("%s: Couldn't register clk provider for node %s: %d\n",
871 __func__, np->name, rc);
872 return;
Tang Yuantian555eae92013-04-09 16:46:26 +0800873 }
Tang Yuantian555eae92013-04-09 16:46:26 +0800874}
875
Julia Lawall3432a2e2016-04-18 16:55:34 +0200876static struct clk __init
877*sysclk_from_fixed(struct device_node *node, const char *name)
Tang Yuantian555eae92013-04-09 16:46:26 +0800878{
Scott Wood0dfc86b2015-09-19 23:29:54 -0500879 u32 rate;
Tang Yuantian555eae92013-04-09 16:46:26 +0800880
Scott Wood0dfc86b2015-09-19 23:29:54 -0500881 if (of_property_read_u32(node, "clock-frequency", &rate))
882 return ERR_PTR(-ENODEV);
883
Stephen Boydec3f2fc2016-03-01 11:00:19 -0800884 return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
Scott Wood0dfc86b2015-09-19 23:29:54 -0500885}
886
887static struct clk *sysclk_from_parent(const char *name)
888{
889 struct clk *clk;
890 const char *parent_name;
891
892 clk = of_clk_get(clockgen.node, 0);
893 if (IS_ERR(clk))
894 return clk;
895
896 /* Register the input clock under the desired name. */
897 parent_name = __clk_get_name(clk);
898 clk = clk_register_fixed_factor(NULL, name, parent_name,
899 0, 1, 1);
900 if (IS_ERR(clk))
901 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
902 PTR_ERR(clk));
903
904 return clk;
905}
906
907static struct clk * __init create_sysclk(const char *name)
908{
909 struct device_node *sysclk;
910 struct clk *clk;
911
912 clk = sysclk_from_fixed(clockgen.node, name);
913 if (!IS_ERR(clk))
914 return clk;
915
916 clk = sysclk_from_parent(name);
917 if (!IS_ERR(clk))
918 return clk;
919
920 sysclk = of_get_child_by_name(clockgen.node, "sysclk");
921 if (sysclk) {
922 clk = sysclk_from_fixed(sysclk, name);
923 if (!IS_ERR(clk))
924 return clk;
925 }
926
927 pr_err("%s: No input clock\n", __func__);
928 return NULL;
929}
930
931/* Legacy node */
932static void __init sysclk_init(struct device_node *node)
933{
934 struct clk *clk;
935
936 legacy_init_clockgen(node);
937
938 clk = clockgen.sysclk;
939 if (clk)
940 of_clk_add_provider(node, of_clk_src_simple_get, clk);
941}
942
943#define PLL_KILL BIT(31)
944
945static void __init create_one_pll(struct clockgen *cg, int idx)
946{
947 u32 __iomem *reg;
948 u32 mult;
949 struct clockgen_pll *pll = &cg->pll[idx];
950 int i;
951
952 if (!(cg->info.pll_mask & (1 << idx)))
953 return;
954
Scott Wood9e19ca22015-09-19 23:29:55 -0500955 if (cg->info.flags & CG_VER3) {
956 switch (idx) {
957 case PLATFORM_PLL:
958 reg = cg->regs + 0x60080;
959 break;
960 case CGA_PLL1:
961 reg = cg->regs + 0x80;
962 break;
963 case CGA_PLL2:
964 reg = cg->regs + 0xa0;
965 break;
966 case CGB_PLL1:
967 reg = cg->regs + 0x10080;
968 break;
969 case CGB_PLL2:
970 reg = cg->regs + 0x100a0;
971 break;
972 default:
973 WARN_ONCE(1, "index %d\n", idx);
974 return;
975 }
976 } else {
977 if (idx == PLATFORM_PLL)
978 reg = cg->regs + 0xc00;
979 else
980 reg = cg->regs + 0x800 + 0x20 * (idx - 1);
981 }
Scott Wood0dfc86b2015-09-19 23:29:54 -0500982
983 /* Get the multiple of PLL */
Scott Wood9e19ca22015-09-19 23:29:55 -0500984 mult = cg_in(cg, reg);
Scott Wood0dfc86b2015-09-19 23:29:54 -0500985
986 /* Check if this PLL is disabled */
987 if (mult & PLL_KILL) {
988 pr_debug("%s(): pll %p disabled\n", __func__, reg);
Tang Yuantian555eae92013-04-09 16:46:26 +0800989 return;
990 }
991
Scott Wood9e19ca22015-09-19 23:29:55 -0500992 if ((cg->info.flags & CG_VER3) ||
993 ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL))
Scott Wood0dfc86b2015-09-19 23:29:54 -0500994 mult = (mult & GENMASK(8, 1)) >> 1;
995 else
996 mult = (mult & GENMASK(6, 1)) >> 1;
Tang Yuantian555eae92013-04-09 16:46:26 +0800997
Scott Wood0dfc86b2015-09-19 23:29:54 -0500998 for (i = 0; i < ARRAY_SIZE(pll->div); i++) {
999 struct clk *clk;
1000
1001 snprintf(pll->div[i].name, sizeof(pll->div[i].name),
1002 "cg-pll%d-div%d", idx, i + 1);
1003
1004 clk = clk_register_fixed_factor(NULL,
1005 pll->div[i].name, "cg-sysclk", 0, mult, i + 1);
1006 if (IS_ERR(clk)) {
1007 pr_err("%s: %s: register failed %ld\n",
1008 __func__, pll->div[i].name, PTR_ERR(clk));
1009 continue;
1010 }
1011
1012 pll->div[i].clk = clk;
Tang Yuantian555eae92013-04-09 16:46:26 +08001013 }
Scott Wood0dfc86b2015-09-19 23:29:54 -05001014}
Tang Yuantian555eae92013-04-09 16:46:26 +08001015
Scott Wood0dfc86b2015-09-19 23:29:54 -05001016static void __init create_plls(struct clockgen *cg)
1017{
1018 int i;
Tang Yuantian555eae92013-04-09 16:46:26 +08001019
Scott Wood0dfc86b2015-09-19 23:29:54 -05001020 for (i = 0; i < ARRAY_SIZE(cg->pll); i++)
1021 create_one_pll(cg, i);
1022}
1023
1024static void __init legacy_pll_init(struct device_node *np, int idx)
1025{
1026 struct clockgen_pll *pll;
1027 struct clk_onecell_data *onecell_data;
1028 struct clk **subclks;
1029 int count, rc;
1030
1031 legacy_init_clockgen(np);
1032
1033 pll = &clockgen.pll[idx];
Tang Yuantian555eae92013-04-09 16:46:26 +08001034 count = of_property_count_strings(np, "clock-output-names");
Tang Yuantian555eae92013-04-09 16:46:26 +08001035
Scott Wood0dfc86b2015-09-19 23:29:54 -05001036 BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4);
1037 subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL);
Emil Medve8002cab2015-01-21 04:03:26 -06001038 if (!subclks)
Scott Wood0dfc86b2015-09-19 23:29:54 -05001039 return;
Tang Yuantian555eae92013-04-09 16:46:26 +08001040
Emil Medve6ef1cca2015-01-21 04:03:28 -06001041 onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
Emil Medve8002cab2015-01-21 04:03:26 -06001042 if (!onecell_data)
Tang Yuantian555eae92013-04-09 16:46:26 +08001043 goto err_clks;
Tang Yuantian555eae92013-04-09 16:46:26 +08001044
Scott Wood0dfc86b2015-09-19 23:29:54 -05001045 if (count <= 3) {
1046 subclks[0] = pll->div[0].clk;
1047 subclks[1] = pll->div[1].clk;
1048 subclks[2] = pll->div[3].clk;
1049 } else {
1050 subclks[0] = pll->div[0].clk;
1051 subclks[1] = pll->div[1].clk;
1052 subclks[2] = pll->div[2].clk;
1053 subclks[3] = pll->div[3].clk;
Tang Yuantian555eae92013-04-09 16:46:26 +08001054 }
1055
1056 onecell_data->clks = subclks;
1057 onecell_data->clk_num = count;
1058
1059 rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
1060 if (rc) {
Scott Wood0dfc86b2015-09-19 23:29:54 -05001061 pr_err("%s: Couldn't register clk provider for node %s: %d\n",
1062 __func__, np->name, rc);
Tang Yuantian555eae92013-04-09 16:46:26 +08001063 goto err_cell;
1064 }
1065
1066 return;
1067err_cell:
1068 kfree(onecell_data);
1069err_clks:
1070 kfree(subclks);
Tang Yuantian00fa6e52014-01-21 09:32:45 +08001071}
1072
Scott Wood0dfc86b2015-09-19 23:29:54 -05001073/* Legacy node */
Emil Medvea513b722015-01-21 04:03:31 -06001074static void __init pltfrm_pll_init(struct device_node *np)
1075{
Scott Wood0dfc86b2015-09-19 23:29:54 -05001076 legacy_pll_init(np, PLATFORM_PLL);
1077}
Emil Medvea513b722015-01-21 04:03:31 -06001078
Scott Wood0dfc86b2015-09-19 23:29:54 -05001079/* Legacy node */
1080static void __init core_pll_init(struct device_node *np)
1081{
1082 struct resource res;
1083 int idx;
1084
1085 if (of_address_to_resource(np, 0, &res))
1086 return;
1087
1088 if ((res.start & 0xfff) == 0xc00) {
1089 /*
1090 * ls1021a devtree labels the platform PLL
1091 * with the core PLL compatible
1092 */
1093 pltfrm_pll_init(np);
1094 } else {
1095 idx = (res.start & 0xf0) >> 5;
1096 legacy_pll_init(np, CGA_PLL1 + idx);
1097 }
1098}
1099
1100static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data)
1101{
1102 struct clockgen *cg = data;
1103 struct clk *clk;
1104 struct clockgen_pll *pll;
1105 u32 type, idx;
1106
1107 if (clkspec->args_count < 2) {
1108 pr_err("%s: insufficient phandle args\n", __func__);
1109 return ERR_PTR(-EINVAL);
1110 }
1111
1112 type = clkspec->args[0];
1113 idx = clkspec->args[1];
1114
1115 switch (type) {
1116 case 0:
1117 if (idx != 0)
1118 goto bad_args;
1119 clk = cg->sysclk;
1120 break;
1121 case 1:
1122 if (idx >= ARRAY_SIZE(cg->cmux))
1123 goto bad_args;
1124 clk = cg->cmux[idx];
1125 break;
1126 case 2:
1127 if (idx >= ARRAY_SIZE(cg->hwaccel))
1128 goto bad_args;
1129 clk = cg->hwaccel[idx];
1130 break;
1131 case 3:
1132 if (idx >= ARRAY_SIZE(cg->fman))
1133 goto bad_args;
1134 clk = cg->fman[idx];
1135 break;
1136 case 4:
1137 pll = &cg->pll[PLATFORM_PLL];
1138 if (idx >= ARRAY_SIZE(pll->div))
1139 goto bad_args;
1140 clk = pll->div[idx].clk;
1141 break;
1142 default:
1143 goto bad_args;
1144 }
1145
1146 if (!clk)
1147 return ERR_PTR(-ENOENT);
1148 return clk;
1149
1150bad_args:
1151 pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx);
1152 return ERR_PTR(-EINVAL);
1153}
1154
1155#ifdef CONFIG_PPC
1156#include <asm/mpc85xx.h>
1157
1158static const u32 a4510_svrs[] __initconst = {
1159 (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */
1160 (SVR_P2040 << 8) | 0x11, /* P2040 1.1 */
1161 (SVR_P2041 << 8) | 0x10, /* P2041 1.0 */
1162 (SVR_P2041 << 8) | 0x11, /* P2041 1.1 */
1163 (SVR_P3041 << 8) | 0x10, /* P3041 1.0 */
1164 (SVR_P3041 << 8) | 0x11, /* P3041 1.1 */
1165 (SVR_P4040 << 8) | 0x20, /* P4040 2.0 */
1166 (SVR_P4080 << 8) | 0x20, /* P4080 2.0 */
1167 (SVR_P5010 << 8) | 0x10, /* P5010 1.0 */
1168 (SVR_P5010 << 8) | 0x20, /* P5010 2.0 */
1169 (SVR_P5020 << 8) | 0x10, /* P5020 1.0 */
1170 (SVR_P5021 << 8) | 0x10, /* P5021 1.0 */
1171 (SVR_P5040 << 8) | 0x10, /* P5040 1.0 */
1172};
1173
1174#define SVR_SECURITY 0x80000 /* The Security (E) bit */
1175
1176static bool __init has_erratum_a4510(void)
1177{
1178 u32 svr = mfspr(SPRN_SVR);
1179 int i;
1180
1181 svr &= ~SVR_SECURITY;
1182
1183 for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) {
1184 if (svr == a4510_svrs[i])
1185 return true;
1186 }
1187
1188 return false;
1189}
1190#else
1191static bool __init has_erratum_a4510(void)
1192{
1193 return false;
1194}
1195#endif
1196
1197static void __init clockgen_init(struct device_node *np)
1198{
1199 int i, ret;
1200 bool is_old_ls1021a = false;
1201
1202 /* May have already been called by a legacy probe */
1203 if (clockgen.node)
1204 return;
1205
1206 clockgen.node = np;
1207 clockgen.regs = of_iomap(np, 0);
1208 if (!clockgen.regs &&
1209 of_device_is_compatible(of_root, "fsl,ls1021a")) {
1210 /* Compatibility hack for old, broken device trees */
1211 clockgen.regs = ioremap(0x1ee1000, 0x1000);
1212 is_old_ls1021a = true;
1213 }
1214 if (!clockgen.regs) {
Emil Medvea513b722015-01-21 04:03:31 -06001215 pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name);
1216 return;
1217 }
1218
Scott Wood0dfc86b2015-09-19 23:29:54 -05001219 for (i = 0; i < ARRAY_SIZE(chipinfo); i++) {
1220 if (of_device_is_compatible(np, chipinfo[i].compat))
1221 break;
1222 if (is_old_ls1021a &&
1223 !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen"))
1224 break;
Emil Medvea513b722015-01-21 04:03:31 -06001225 }
1226
Scott Wood0dfc86b2015-09-19 23:29:54 -05001227 if (i == ARRAY_SIZE(chipinfo)) {
1228 pr_err("%s: unknown clockgen node %s\n", __func__,
1229 np->full_name);
1230 goto err;
Emil Medvea513b722015-01-21 04:03:31 -06001231 }
Scott Wood0dfc86b2015-09-19 23:29:54 -05001232 clockgen.info = chipinfo[i];
Emil Medvea513b722015-01-21 04:03:31 -06001233
Scott Wood0dfc86b2015-09-19 23:29:54 -05001234 if (clockgen.info.guts_compat) {
1235 struct device_node *guts;
Emil Medvea513b722015-01-21 04:03:31 -06001236
Scott Wood0dfc86b2015-09-19 23:29:54 -05001237 guts = of_find_compatible_node(NULL, NULL,
1238 clockgen.info.guts_compat);
1239 if (guts) {
1240 clockgen.guts = of_iomap(guts, 0);
1241 if (!clockgen.guts) {
1242 pr_err("%s: Couldn't map %s regs\n", __func__,
1243 guts->full_name);
1244 }
Emil Medvea513b722015-01-21 04:03:31 -06001245 }
1246
Emil Medvea513b722015-01-21 04:03:31 -06001247 }
1248
Scott Wood0dfc86b2015-09-19 23:29:54 -05001249 if (has_erratum_a4510())
1250 clockgen.info.flags |= CG_CMUX_GE_PLAT;
1251
1252 clockgen.sysclk = create_sysclk("cg-sysclk");
1253 create_plls(&clockgen);
1254 create_muxes(&clockgen);
1255
1256 if (clockgen.info.init_periph)
1257 clockgen.info.init_periph(&clockgen);
1258
1259 ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen);
1260 if (ret) {
1261 pr_err("%s: Couldn't register clk provider for node %s: %d\n",
1262 __func__, np->name, ret);
Emil Medvea513b722015-01-21 04:03:31 -06001263 }
1264
1265 return;
Scott Wood0dfc86b2015-09-19 23:29:54 -05001266err:
1267 iounmap(clockgen.regs);
1268 clockgen.regs = NULL;
Emil Medvea513b722015-01-21 04:03:31 -06001269}
1270
Scott Wood0dfc86b2015-09-19 23:29:54 -05001271CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
1272CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
1273CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
Hou Zhiqiange994412c2015-10-23 16:01:21 +08001274CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init);
Scott Wood9e19ca22015-09-19 23:29:55 -05001275CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001276
1277/* Legacy nodes */
Kevin Hao66619ac2014-12-03 16:53:53 +08001278CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
1279CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
1280CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);
1281CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init);
1282CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init);
1283CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init);
Emil Medvea513b722015-01-21 04:03:31 -06001284CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init);
1285CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init);