blob: 06281a36252769f963c7d34a1b362a8fb039664d [file] [log] [blame]
Tang Yuantian555eae92013-04-09 16:46:26 +08001/*
2 * Copyright 2013 Freescale Semiconductor, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
Tang Yuantian93a17c02015-01-15 14:03:41 +08008 * clock driver for Freescale QorIQ SoCs.
Tang Yuantian555eae92013-04-09 16:46:26 +08009 */
Emil Medvec88b2b62015-01-21 04:03:29 -060010
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
Scott Wood0dfc86b2015-09-19 23:29:54 -050013#include <linux/clk.h>
Tang Yuantian555eae92013-04-09 16:46:26 +080014#include <linux/clk-provider.h>
Scott Wood0dfc86b2015-09-19 23:29:54 -050015#include <linux/fsl/guts.h>
Tang Yuantian555eae92013-04-09 16:46:26 +080016#include <linux/io.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
Rob Herringc11eede2013-11-10 23:19:08 -060019#include <linux/of_address.h>
Tang Yuantian555eae92013-04-09 16:46:26 +080020#include <linux/of_platform.h>
21#include <linux/of.h>
22#include <linux/slab.h>
23
Scott Wood0dfc86b2015-09-19 23:29:54 -050024#define PLL_DIV1 0
25#define PLL_DIV2 1
26#define PLL_DIV3 2
27#define PLL_DIV4 3
28
29#define PLATFORM_PLL 0
30#define CGA_PLL1 1
31#define CGA_PLL2 2
32#define CGA_PLL3 3
33#define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */
34#define CGB_PLL1 4
35#define CGB_PLL2 5
36
37struct clockgen_pll_div {
38 struct clk *clk;
39 char name[32];
Tang Yuantian555eae92013-04-09 16:46:26 +080040};
41
Scott Wood0dfc86b2015-09-19 23:29:54 -050042struct clockgen_pll {
43 struct clockgen_pll_div div[4];
44};
Tang Yuantian555eae92013-04-09 16:46:26 +080045
Scott Wood0dfc86b2015-09-19 23:29:54 -050046#define CLKSEL_VALID 1
47#define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */
48
49struct clockgen_sourceinfo {
50 u32 flags; /* CLKSEL_xxx */
51 int pll; /* CGx_PLLn */
52 int div; /* PLL_DIVn */
53};
54
55#define NUM_MUX_PARENTS 16
56
57struct clockgen_muxinfo {
58 struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS];
59};
60
61#define NUM_HWACCEL 5
62#define NUM_CMUX 8
63
64struct clockgen;
65
66/*
67 * cmux freq must be >= platform pll.
68 * If not set, cmux freq must be >= platform pll/2
69 */
70#define CG_CMUX_GE_PLAT 1
71#define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */
72
73struct clockgen_chipinfo {
74 const char *compat, *guts_compat;
75 const struct clockgen_muxinfo *cmux_groups[2];
76 const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL];
77 void (*init_periph)(struct clockgen *cg);
78 int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */
79 u32 pll_mask; /* 1 << n bit set if PLL n is valid */
80 u32 flags; /* CG_xxx */
81};
82
83struct clockgen {
84 struct device_node *node;
85 void __iomem *regs;
86 struct clockgen_chipinfo info; /* mutable copy */
87 struct clk *sysclk;
88 struct clockgen_pll pll[6];
89 struct clk *cmux[NUM_CMUX];
90 struct clk *hwaccel[NUM_HWACCEL];
91 struct clk *fman[2];
92 struct ccsr_guts __iomem *guts;
93};
94
95static struct clockgen clockgen;
96
97static const struct clockgen_muxinfo p2041_cmux_grp1 = {
98 {
99 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
100 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
101 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
102 }
103};
104
105static const struct clockgen_muxinfo p2041_cmux_grp2 = {
106 {
107 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
108 [4] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
109 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
110 }
111};
112
113static const struct clockgen_muxinfo p5020_cmux_grp1 = {
114 {
115 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
116 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
117 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
118 }
119};
120
121static const struct clockgen_muxinfo p5020_cmux_grp2 = {
122 {
123 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
124 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
125 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
126 }
127};
128
129static const struct clockgen_muxinfo p5040_cmux_grp1 = {
130 {
131 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
132 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
133 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
134 [5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 },
135 }
136};
137
138static const struct clockgen_muxinfo p5040_cmux_grp2 = {
139 {
140 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
141 [1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 },
142 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
143 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
144 }
145};
146
147static const struct clockgen_muxinfo p4080_cmux_grp1 = {
148 {
149 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
150 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
151 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
152 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
153 [8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 },
154 }
155};
156
157static const struct clockgen_muxinfo p4080_cmux_grp2 = {
158 {
159 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
160 [8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
161 [9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
162 [12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 },
163 [13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 },
164 }
165};
166
167static const struct clockgen_muxinfo t1023_cmux = {
168 {
169 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
170 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
171 }
172};
173
174static const struct clockgen_muxinfo t1040_cmux = {
175 {
176 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
177 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
178 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
179 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
180 }
181};
182
183
184static const struct clockgen_muxinfo clockgen2_cmux_cga = {
185 {
186 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
187 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
188 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
189 {},
190 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
191 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
192 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
193 {},
194 { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
195 { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
196 { CLKSEL_VALID, CGA_PLL3, PLL_DIV4 },
197 },
198};
199
200static const struct clockgen_muxinfo clockgen2_cmux_cga12 = {
201 {
202 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
203 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
204 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
205 {},
206 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
207 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
208 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
209 },
210};
211
212static const struct clockgen_muxinfo clockgen2_cmux_cgb = {
213 {
214 { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 },
215 { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
216 { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
217 {},
218 { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 },
219 { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
220 { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
221 },
222};
223
224static const struct clockgen_muxinfo t1023_hwa1 = {
225 {
226 {},
227 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
228 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
229 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
230 },
231};
232
233static const struct clockgen_muxinfo t1023_hwa2 = {
234 {
235 [6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
236 },
237};
238
239static const struct clockgen_muxinfo t2080_hwa1 = {
240 {
241 {},
242 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
243 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
244 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
245 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
246 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
247 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
248 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
249 },
250};
251
252static const struct clockgen_muxinfo t2080_hwa2 = {
253 {
254 {},
255 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
256 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
257 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
258 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
259 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
260 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
261 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
262 },
263};
264
265static const struct clockgen_muxinfo t4240_hwa1 = {
266 {
267 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 },
268 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
269 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
270 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
271 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
272 {},
273 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
274 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
275 },
276};
277
278static const struct clockgen_muxinfo t4240_hwa4 = {
279 {
280 [2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
281 [3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
282 [4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
283 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
284 [6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
285 },
286};
287
288static const struct clockgen_muxinfo t4240_hwa5 = {
289 {
290 [2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
291 [3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 },
292 [4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
293 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
294 [6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
295 [7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
296 },
297};
298
299#define RCWSR7_FM1_CLK_SEL 0x40000000
300#define RCWSR7_FM2_CLK_SEL 0x20000000
301#define RCWSR7_HWA_ASYNC_DIV 0x04000000
302
303static void __init p2041_init_periph(struct clockgen *cg)
Tang Yuantian555eae92013-04-09 16:46:26 +0800304{
Scott Wood0dfc86b2015-09-19 23:29:54 -0500305 u32 reg;
306
307 reg = ioread32be(&cg->guts->rcwsr[7]);
308
309 if (reg & RCWSR7_FM1_CLK_SEL)
310 cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk;
311 else
312 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
313}
314
315static void __init p4080_init_periph(struct clockgen *cg)
316{
317 u32 reg;
318
319 reg = ioread32be(&cg->guts->rcwsr[7]);
320
321 if (reg & RCWSR7_FM1_CLK_SEL)
322 cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
323 else
324 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
325
326 if (reg & RCWSR7_FM2_CLK_SEL)
327 cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
328 else
329 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
330}
331
332static void __init p5020_init_periph(struct clockgen *cg)
333{
334 u32 reg;
335 int div = PLL_DIV2;
336
337 reg = ioread32be(&cg->guts->rcwsr[7]);
338 if (reg & RCWSR7_HWA_ASYNC_DIV)
339 div = PLL_DIV4;
340
341 if (reg & RCWSR7_FM1_CLK_SEL)
342 cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk;
343 else
344 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
345}
346
347static void __init p5040_init_periph(struct clockgen *cg)
348{
349 u32 reg;
350 int div = PLL_DIV2;
351
352 reg = ioread32be(&cg->guts->rcwsr[7]);
353 if (reg & RCWSR7_HWA_ASYNC_DIV)
354 div = PLL_DIV4;
355
356 if (reg & RCWSR7_FM1_CLK_SEL)
357 cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk;
358 else
359 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
360
361 if (reg & RCWSR7_FM2_CLK_SEL)
362 cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk;
363 else
364 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
365}
366
367static void __init t1023_init_periph(struct clockgen *cg)
368{
369 cg->fman[0] = cg->hwaccel[1];
370}
371
372static void __init t1040_init_periph(struct clockgen *cg)
373{
374 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk;
375}
376
377static void __init t2080_init_periph(struct clockgen *cg)
378{
379 cg->fman[0] = cg->hwaccel[0];
380}
381
382static void __init t4240_init_periph(struct clockgen *cg)
383{
384 cg->fman[0] = cg->hwaccel[3];
385 cg->fman[1] = cg->hwaccel[4];
386}
387
388static const struct clockgen_chipinfo chipinfo[] = {
389 {
390 .compat = "fsl,b4420-clockgen",
391 .guts_compat = "fsl,b4860-device-config",
392 .init_periph = t2080_init_periph,
393 .cmux_groups = {
394 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
395 },
396 .hwaccel = {
397 &t2080_hwa1
398 },
399 .cmux_to_group = {
400 0, 1, 1, 1, -1
401 },
402 .pll_mask = 0x3f,
403 .flags = CG_PLL_8BIT,
404 },
405 {
406 .compat = "fsl,b4860-clockgen",
407 .guts_compat = "fsl,b4860-device-config",
408 .init_periph = t2080_init_periph,
409 .cmux_groups = {
410 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
411 },
412 .hwaccel = {
413 &t2080_hwa1
414 },
415 .cmux_to_group = {
416 0, 1, 1, 1, -1
417 },
418 .pll_mask = 0x3f,
419 .flags = CG_PLL_8BIT,
420 },
421 {
422 .compat = "fsl,ls1021a-clockgen",
423 .cmux_groups = {
424 &t1023_cmux
425 },
426 .cmux_to_group = {
427 0, -1
428 },
429 .pll_mask = 0x03,
430 },
431 {
432 .compat = "fsl,p2041-clockgen",
433 .guts_compat = "fsl,qoriq-device-config-1.0",
434 .init_periph = p2041_init_periph,
435 .cmux_groups = {
436 &p2041_cmux_grp1, &p2041_cmux_grp2
437 },
438 .cmux_to_group = {
439 0, 0, 1, 1, -1
440 },
441 .pll_mask = 0x07,
442 },
443 {
444 .compat = "fsl,p3041-clockgen",
445 .guts_compat = "fsl,qoriq-device-config-1.0",
446 .init_periph = p2041_init_periph,
447 .cmux_groups = {
448 &p2041_cmux_grp1, &p2041_cmux_grp2
449 },
450 .cmux_to_group = {
451 0, 0, 1, 1, -1
452 },
453 .pll_mask = 0x07,
454 },
455 {
456 .compat = "fsl,p4080-clockgen",
457 .guts_compat = "fsl,qoriq-device-config-1.0",
458 .init_periph = p4080_init_periph,
459 .cmux_groups = {
460 &p4080_cmux_grp1, &p4080_cmux_grp2
461 },
462 .cmux_to_group = {
463 0, 0, 0, 0, 1, 1, 1, 1
464 },
465 .pll_mask = 0x1f,
466 },
467 {
468 .compat = "fsl,p5020-clockgen",
469 .guts_compat = "fsl,qoriq-device-config-1.0",
470 .init_periph = p5020_init_periph,
471 .cmux_groups = {
472 &p2041_cmux_grp1, &p2041_cmux_grp2
473 },
474 .cmux_to_group = {
475 0, 1, -1
476 },
477 .pll_mask = 0x07,
478 },
479 {
480 .compat = "fsl,p5040-clockgen",
481 .guts_compat = "fsl,p5040-device-config",
482 .init_periph = p5040_init_periph,
483 .cmux_groups = {
484 &p5040_cmux_grp1, &p5040_cmux_grp2
485 },
486 .cmux_to_group = {
487 0, 0, 1, 1, -1
488 },
489 .pll_mask = 0x0f,
490 },
491 {
492 .compat = "fsl,t1023-clockgen",
493 .guts_compat = "fsl,t1023-device-config",
494 .init_periph = t1023_init_periph,
495 .cmux_groups = {
496 &t1023_cmux
497 },
498 .hwaccel = {
499 &t1023_hwa1, &t1023_hwa2
500 },
501 .cmux_to_group = {
502 0, 0, -1
503 },
504 .pll_mask = 0x03,
505 .flags = CG_PLL_8BIT,
506 },
507 {
508 .compat = "fsl,t1040-clockgen",
509 .guts_compat = "fsl,t1040-device-config",
510 .init_periph = t1040_init_periph,
511 .cmux_groups = {
512 &t1040_cmux
513 },
514 .cmux_to_group = {
515 0, 0, 0, 0, -1
516 },
517 .pll_mask = 0x07,
518 .flags = CG_PLL_8BIT,
519 },
520 {
521 .compat = "fsl,t2080-clockgen",
522 .guts_compat = "fsl,t2080-device-config",
523 .init_periph = t2080_init_periph,
524 .cmux_groups = {
525 &clockgen2_cmux_cga12
526 },
527 .hwaccel = {
528 &t2080_hwa1, &t2080_hwa2
529 },
530 .cmux_to_group = {
531 0, -1
532 },
533 .pll_mask = 0x07,
534 .flags = CG_PLL_8BIT,
535 },
536 {
537 .compat = "fsl,t4240-clockgen",
538 .guts_compat = "fsl,t4240-device-config",
539 .init_periph = t4240_init_periph,
540 .cmux_groups = {
541 &clockgen2_cmux_cga, &clockgen2_cmux_cgb
542 },
543 .hwaccel = {
544 &t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5
545 },
546 .cmux_to_group = {
547 0, 0, 1, -1
548 },
549 .pll_mask = 0x3f,
550 .flags = CG_PLL_8BIT,
551 },
552 {},
553};
554
555struct mux_hwclock {
556 struct clk_hw hw;
557 struct clockgen *cg;
558 const struct clockgen_muxinfo *info;
559 u32 __iomem *reg;
560 u8 parent_to_clksel[NUM_MUX_PARENTS];
561 s8 clksel_to_parent[NUM_MUX_PARENTS];
562 int num_parents;
563};
564
565#define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw)
566#define CLKSEL_MASK 0x78000000
567#define CLKSEL_SHIFT 27
568
569static int mux_set_parent(struct clk_hw *hw, u8 idx)
570{
571 struct mux_hwclock *hwc = to_mux_hwclock(hw);
Tang Yuantian555eae92013-04-09 16:46:26 +0800572 u32 clksel;
573
Scott Wood0dfc86b2015-09-19 23:29:54 -0500574 if (idx >= hwc->num_parents)
575 return -EINVAL;
576
577 clksel = hwc->parent_to_clksel[idx];
578 iowrite32be((clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg);
Tang Yuantian555eae92013-04-09 16:46:26 +0800579
580 return 0;
581}
582
Scott Wood0dfc86b2015-09-19 23:29:54 -0500583static u8 mux_get_parent(struct clk_hw *hw)
Tang Yuantian555eae92013-04-09 16:46:26 +0800584{
Scott Wood0dfc86b2015-09-19 23:29:54 -0500585 struct mux_hwclock *hwc = to_mux_hwclock(hw);
Tang Yuantian555eae92013-04-09 16:46:26 +0800586 u32 clksel;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500587 s8 ret;
Tang Yuantian555eae92013-04-09 16:46:26 +0800588
Scott Wood0dfc86b2015-09-19 23:29:54 -0500589 clksel = (ioread32be(hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
Tang Yuantian555eae92013-04-09 16:46:26 +0800590
Scott Wood0dfc86b2015-09-19 23:29:54 -0500591 ret = hwc->clksel_to_parent[clksel];
592 if (ret < 0) {
593 pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg);
594 return 0;
595 }
596
597 return ret;
Tang Yuantian555eae92013-04-09 16:46:26 +0800598}
599
Emil Medve334680d2015-01-21 04:03:27 -0600600static const struct clk_ops cmux_ops = {
Scott Wood0dfc86b2015-09-19 23:29:54 -0500601 .get_parent = mux_get_parent,
602 .set_parent = mux_set_parent,
Tang Yuantian555eae92013-04-09 16:46:26 +0800603};
604
Scott Wood0dfc86b2015-09-19 23:29:54 -0500605/*
606 * Don't allow setting for now, as the clock options haven't been
607 * sanitized for additional restrictions.
608 */
609static const struct clk_ops hwaccel_ops = {
610 .get_parent = mux_get_parent,
611};
612
613static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg,
614 struct mux_hwclock *hwc,
615 int idx)
616{
617 int pll, div;
618
619 if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID))
620 return NULL;
621
622 pll = hwc->info->clksel[idx].pll;
623 div = hwc->info->clksel[idx].div;
624
625 return &cg->pll[pll].div[div];
626}
627
628static struct clk * __init create_mux_common(struct clockgen *cg,
629 struct mux_hwclock *hwc,
630 const struct clk_ops *ops,
631 unsigned long min_rate,
632 unsigned long pct80_rate,
633 const char *fmt, int idx)
634{
635 struct clk_init_data init = {};
636 struct clk *clk;
637 const struct clockgen_pll_div *div;
638 const char *parent_names[NUM_MUX_PARENTS];
639 char name[32];
640 int i, j;
641
642 snprintf(name, sizeof(name), fmt, idx);
643
644 for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) {
645 unsigned long rate;
646
647 hwc->clksel_to_parent[i] = -1;
648
649 div = get_pll_div(cg, hwc, i);
650 if (!div)
651 continue;
652
653 rate = clk_get_rate(div->clk);
654
655 if (hwc->info->clksel[i].flags & CLKSEL_80PCT &&
656 rate > pct80_rate)
657 continue;
658 if (rate < min_rate)
659 continue;
660
661 parent_names[j] = div->name;
662 hwc->parent_to_clksel[j] = i;
663 hwc->clksel_to_parent[i] = j;
664 j++;
665 }
666
667 init.name = name;
668 init.ops = ops;
669 init.parent_names = parent_names;
670 init.num_parents = hwc->num_parents = j;
671 init.flags = 0;
672 hwc->hw.init = &init;
673 hwc->cg = cg;
674
675 clk = clk_register(NULL, &hwc->hw);
676 if (IS_ERR(clk)) {
677 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
678 PTR_ERR(clk));
679 kfree(hwc);
680 return NULL;
681 }
682
683 return clk;
684}
685
686static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
687{
688 struct mux_hwclock *hwc;
689 const struct clockgen_pll_div *div;
690 unsigned long plat_rate, min_rate;
691 u64 pct80_rate;
692 u32 clksel;
693
694 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
695 if (!hwc)
696 return NULL;
697
698 hwc->reg = cg->regs + 0x20 * idx;
699 hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]];
700
701 /*
702 * Find the rate for the default clksel, and treat it as the
703 * maximum rated core frequency. If this is an incorrect
704 * assumption, certain clock options (possibly including the
705 * default clksel) may be inappropriately excluded on certain
706 * chips.
707 */
708 clksel = (ioread32be(hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
709 div = get_pll_div(cg, hwc, clksel);
710 if (!div)
711 return NULL;
712
713 pct80_rate = clk_get_rate(div->clk);
714 pct80_rate *= 8;
715 do_div(pct80_rate, 10);
716
717 plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
718
719 if (cg->info.flags & CG_CMUX_GE_PLAT)
720 min_rate = plat_rate;
721 else
722 min_rate = plat_rate / 2;
723
724 return create_mux_common(cg, hwc, &cmux_ops, min_rate,
725 pct80_rate, "cg-cmux%d", idx);
726}
727
728static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
729{
730 struct mux_hwclock *hwc;
731
732 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
733 if (!hwc)
734 return NULL;
735
736 hwc->reg = cg->regs + 0x20 * idx + 0x10;
737 hwc->info = cg->info.hwaccel[idx];
738
739 return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0,
740 "cg-hwaccel%d", idx);
741}
742
743static void __init create_muxes(struct clockgen *cg)
744{
745 int i;
746
747 for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) {
748 if (cg->info.cmux_to_group[i] < 0)
749 break;
750 if (cg->info.cmux_to_group[i] >=
751 ARRAY_SIZE(cg->info.cmux_groups)) {
752 WARN_ON_ONCE(1);
753 continue;
754 }
755
756 cg->cmux[i] = create_one_cmux(cg, i);
757 }
758
759 for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) {
760 if (!cg->info.hwaccel[i])
761 continue;
762
763 cg->hwaccel[i] = create_one_hwaccel(cg, i);
764 }
765}
766
767static void __init clockgen_init(struct device_node *np);
768
769/* Legacy nodes may get probed before the parent clockgen node */
770static void __init legacy_init_clockgen(struct device_node *np)
771{
772 if (!clockgen.node)
773 clockgen_init(of_get_parent(np));
774}
775
776/* Legacy node */
Tang Yuantian555eae92013-04-09 16:46:26 +0800777static void __init core_mux_init(struct device_node *np)
778{
779 struct clk *clk;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500780 struct resource res;
781 int idx, rc;
Tang Yuantian555eae92013-04-09 16:46:26 +0800782
Scott Wood0dfc86b2015-09-19 23:29:54 -0500783 legacy_init_clockgen(np);
Tang Yuantian555eae92013-04-09 16:46:26 +0800784
Scott Wood0dfc86b2015-09-19 23:29:54 -0500785 if (of_address_to_resource(np, 0, &res))
Tang Yuantian555eae92013-04-09 16:46:26 +0800786 return;
Tang Yuantian555eae92013-04-09 16:46:26 +0800787
Scott Wood0dfc86b2015-09-19 23:29:54 -0500788 idx = (res.start & 0xf0) >> 5;
789 clk = clockgen.cmux[idx];
Tang Yuantian555eae92013-04-09 16:46:26 +0800790
791 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
792 if (rc) {
Scott Wood0dfc86b2015-09-19 23:29:54 -0500793 pr_err("%s: Couldn't register clk provider for node %s: %d\n",
794 __func__, np->name, rc);
795 return;
Tang Yuantian555eae92013-04-09 16:46:26 +0800796 }
Tang Yuantian555eae92013-04-09 16:46:26 +0800797}
798
Scott Wood0dfc86b2015-09-19 23:29:54 -0500799static struct clk *sysclk_from_fixed(struct device_node *node, const char *name)
Tang Yuantian555eae92013-04-09 16:46:26 +0800800{
Scott Wood0dfc86b2015-09-19 23:29:54 -0500801 u32 rate;
Tang Yuantian555eae92013-04-09 16:46:26 +0800802
Scott Wood0dfc86b2015-09-19 23:29:54 -0500803 if (of_property_read_u32(node, "clock-frequency", &rate))
804 return ERR_PTR(-ENODEV);
805
806 return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
807}
808
809static struct clk *sysclk_from_parent(const char *name)
810{
811 struct clk *clk;
812 const char *parent_name;
813
814 clk = of_clk_get(clockgen.node, 0);
815 if (IS_ERR(clk))
816 return clk;
817
818 /* Register the input clock under the desired name. */
819 parent_name = __clk_get_name(clk);
820 clk = clk_register_fixed_factor(NULL, name, parent_name,
821 0, 1, 1);
822 if (IS_ERR(clk))
823 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
824 PTR_ERR(clk));
825
826 return clk;
827}
828
829static struct clk * __init create_sysclk(const char *name)
830{
831 struct device_node *sysclk;
832 struct clk *clk;
833
834 clk = sysclk_from_fixed(clockgen.node, name);
835 if (!IS_ERR(clk))
836 return clk;
837
838 clk = sysclk_from_parent(name);
839 if (!IS_ERR(clk))
840 return clk;
841
842 sysclk = of_get_child_by_name(clockgen.node, "sysclk");
843 if (sysclk) {
844 clk = sysclk_from_fixed(sysclk, name);
845 if (!IS_ERR(clk))
846 return clk;
847 }
848
849 pr_err("%s: No input clock\n", __func__);
850 return NULL;
851}
852
853/* Legacy node */
854static void __init sysclk_init(struct device_node *node)
855{
856 struct clk *clk;
857
858 legacy_init_clockgen(node);
859
860 clk = clockgen.sysclk;
861 if (clk)
862 of_clk_add_provider(node, of_clk_src_simple_get, clk);
863}
864
865#define PLL_KILL BIT(31)
866
867static void __init create_one_pll(struct clockgen *cg, int idx)
868{
869 u32 __iomem *reg;
870 u32 mult;
871 struct clockgen_pll *pll = &cg->pll[idx];
872 int i;
873
874 if (!(cg->info.pll_mask & (1 << idx)))
875 return;
876
877 if (idx == PLATFORM_PLL)
878 reg = cg->regs + 0xc00;
879 else
880 reg = cg->regs + 0x800 + 0x20 * (idx - 1);
881
882 /* Get the multiple of PLL */
883 mult = ioread32be(reg);
884
885 /* Check if this PLL is disabled */
886 if (mult & PLL_KILL) {
887 pr_debug("%s(): pll %p disabled\n", __func__, reg);
Tang Yuantian555eae92013-04-09 16:46:26 +0800888 return;
889 }
890
Scott Wood0dfc86b2015-09-19 23:29:54 -0500891 if ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL)
892 mult = (mult & GENMASK(8, 1)) >> 1;
893 else
894 mult = (mult & GENMASK(6, 1)) >> 1;
Tang Yuantian555eae92013-04-09 16:46:26 +0800895
Scott Wood0dfc86b2015-09-19 23:29:54 -0500896 for (i = 0; i < ARRAY_SIZE(pll->div); i++) {
897 struct clk *clk;
898
899 snprintf(pll->div[i].name, sizeof(pll->div[i].name),
900 "cg-pll%d-div%d", idx, i + 1);
901
902 clk = clk_register_fixed_factor(NULL,
903 pll->div[i].name, "cg-sysclk", 0, mult, i + 1);
904 if (IS_ERR(clk)) {
905 pr_err("%s: %s: register failed %ld\n",
906 __func__, pll->div[i].name, PTR_ERR(clk));
907 continue;
908 }
909
910 pll->div[i].clk = clk;
Tang Yuantian555eae92013-04-09 16:46:26 +0800911 }
Scott Wood0dfc86b2015-09-19 23:29:54 -0500912}
Tang Yuantian555eae92013-04-09 16:46:26 +0800913
Scott Wood0dfc86b2015-09-19 23:29:54 -0500914static void __init create_plls(struct clockgen *cg)
915{
916 int i;
Tang Yuantian555eae92013-04-09 16:46:26 +0800917
Scott Wood0dfc86b2015-09-19 23:29:54 -0500918 for (i = 0; i < ARRAY_SIZE(cg->pll); i++)
919 create_one_pll(cg, i);
920}
921
922static void __init legacy_pll_init(struct device_node *np, int idx)
923{
924 struct clockgen_pll *pll;
925 struct clk_onecell_data *onecell_data;
926 struct clk **subclks;
927 int count, rc;
928
929 legacy_init_clockgen(np);
930
931 pll = &clockgen.pll[idx];
Tang Yuantian555eae92013-04-09 16:46:26 +0800932 count = of_property_count_strings(np, "clock-output-names");
Tang Yuantian555eae92013-04-09 16:46:26 +0800933
Scott Wood0dfc86b2015-09-19 23:29:54 -0500934 BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4);
935 subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL);
Emil Medve8002cab2015-01-21 04:03:26 -0600936 if (!subclks)
Scott Wood0dfc86b2015-09-19 23:29:54 -0500937 return;
Tang Yuantian555eae92013-04-09 16:46:26 +0800938
Emil Medve6ef1cca2015-01-21 04:03:28 -0600939 onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
Emil Medve8002cab2015-01-21 04:03:26 -0600940 if (!onecell_data)
Tang Yuantian555eae92013-04-09 16:46:26 +0800941 goto err_clks;
Tang Yuantian555eae92013-04-09 16:46:26 +0800942
Scott Wood0dfc86b2015-09-19 23:29:54 -0500943 if (count <= 3) {
944 subclks[0] = pll->div[0].clk;
945 subclks[1] = pll->div[1].clk;
946 subclks[2] = pll->div[3].clk;
947 } else {
948 subclks[0] = pll->div[0].clk;
949 subclks[1] = pll->div[1].clk;
950 subclks[2] = pll->div[2].clk;
951 subclks[3] = pll->div[3].clk;
Tang Yuantian555eae92013-04-09 16:46:26 +0800952 }
953
954 onecell_data->clks = subclks;
955 onecell_data->clk_num = count;
956
957 rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
958 if (rc) {
Scott Wood0dfc86b2015-09-19 23:29:54 -0500959 pr_err("%s: Couldn't register clk provider for node %s: %d\n",
960 __func__, np->name, rc);
Tang Yuantian555eae92013-04-09 16:46:26 +0800961 goto err_cell;
962 }
963
964 return;
965err_cell:
966 kfree(onecell_data);
967err_clks:
968 kfree(subclks);
Tang Yuantian00fa6e52014-01-21 09:32:45 +0800969}
970
Scott Wood0dfc86b2015-09-19 23:29:54 -0500971/* Legacy node */
Emil Medvea513b722015-01-21 04:03:31 -0600972static void __init pltfrm_pll_init(struct device_node *np)
973{
Scott Wood0dfc86b2015-09-19 23:29:54 -0500974 legacy_pll_init(np, PLATFORM_PLL);
975}
Emil Medvea513b722015-01-21 04:03:31 -0600976
Scott Wood0dfc86b2015-09-19 23:29:54 -0500977/* Legacy node */
978static void __init core_pll_init(struct device_node *np)
979{
980 struct resource res;
981 int idx;
982
983 if (of_address_to_resource(np, 0, &res))
984 return;
985
986 if ((res.start & 0xfff) == 0xc00) {
987 /*
988 * ls1021a devtree labels the platform PLL
989 * with the core PLL compatible
990 */
991 pltfrm_pll_init(np);
992 } else {
993 idx = (res.start & 0xf0) >> 5;
994 legacy_pll_init(np, CGA_PLL1 + idx);
995 }
996}
997
998static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data)
999{
1000 struct clockgen *cg = data;
1001 struct clk *clk;
1002 struct clockgen_pll *pll;
1003 u32 type, idx;
1004
1005 if (clkspec->args_count < 2) {
1006 pr_err("%s: insufficient phandle args\n", __func__);
1007 return ERR_PTR(-EINVAL);
1008 }
1009
1010 type = clkspec->args[0];
1011 idx = clkspec->args[1];
1012
1013 switch (type) {
1014 case 0:
1015 if (idx != 0)
1016 goto bad_args;
1017 clk = cg->sysclk;
1018 break;
1019 case 1:
1020 if (idx >= ARRAY_SIZE(cg->cmux))
1021 goto bad_args;
1022 clk = cg->cmux[idx];
1023 break;
1024 case 2:
1025 if (idx >= ARRAY_SIZE(cg->hwaccel))
1026 goto bad_args;
1027 clk = cg->hwaccel[idx];
1028 break;
1029 case 3:
1030 if (idx >= ARRAY_SIZE(cg->fman))
1031 goto bad_args;
1032 clk = cg->fman[idx];
1033 break;
1034 case 4:
1035 pll = &cg->pll[PLATFORM_PLL];
1036 if (idx >= ARRAY_SIZE(pll->div))
1037 goto bad_args;
1038 clk = pll->div[idx].clk;
1039 break;
1040 default:
1041 goto bad_args;
1042 }
1043
1044 if (!clk)
1045 return ERR_PTR(-ENOENT);
1046 return clk;
1047
1048bad_args:
1049 pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx);
1050 return ERR_PTR(-EINVAL);
1051}
1052
1053#ifdef CONFIG_PPC
1054#include <asm/mpc85xx.h>
1055
1056static const u32 a4510_svrs[] __initconst = {
1057 (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */
1058 (SVR_P2040 << 8) | 0x11, /* P2040 1.1 */
1059 (SVR_P2041 << 8) | 0x10, /* P2041 1.0 */
1060 (SVR_P2041 << 8) | 0x11, /* P2041 1.1 */
1061 (SVR_P3041 << 8) | 0x10, /* P3041 1.0 */
1062 (SVR_P3041 << 8) | 0x11, /* P3041 1.1 */
1063 (SVR_P4040 << 8) | 0x20, /* P4040 2.0 */
1064 (SVR_P4080 << 8) | 0x20, /* P4080 2.0 */
1065 (SVR_P5010 << 8) | 0x10, /* P5010 1.0 */
1066 (SVR_P5010 << 8) | 0x20, /* P5010 2.0 */
1067 (SVR_P5020 << 8) | 0x10, /* P5020 1.0 */
1068 (SVR_P5021 << 8) | 0x10, /* P5021 1.0 */
1069 (SVR_P5040 << 8) | 0x10, /* P5040 1.0 */
1070};
1071
1072#define SVR_SECURITY 0x80000 /* The Security (E) bit */
1073
1074static bool __init has_erratum_a4510(void)
1075{
1076 u32 svr = mfspr(SPRN_SVR);
1077 int i;
1078
1079 svr &= ~SVR_SECURITY;
1080
1081 for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) {
1082 if (svr == a4510_svrs[i])
1083 return true;
1084 }
1085
1086 return false;
1087}
1088#else
1089static bool __init has_erratum_a4510(void)
1090{
1091 return false;
1092}
1093#endif
1094
1095static void __init clockgen_init(struct device_node *np)
1096{
1097 int i, ret;
1098 bool is_old_ls1021a = false;
1099
1100 /* May have already been called by a legacy probe */
1101 if (clockgen.node)
1102 return;
1103
1104 clockgen.node = np;
1105 clockgen.regs = of_iomap(np, 0);
1106 if (!clockgen.regs &&
1107 of_device_is_compatible(of_root, "fsl,ls1021a")) {
1108 /* Compatibility hack for old, broken device trees */
1109 clockgen.regs = ioremap(0x1ee1000, 0x1000);
1110 is_old_ls1021a = true;
1111 }
1112 if (!clockgen.regs) {
Emil Medvea513b722015-01-21 04:03:31 -06001113 pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name);
1114 return;
1115 }
1116
Scott Wood0dfc86b2015-09-19 23:29:54 -05001117 for (i = 0; i < ARRAY_SIZE(chipinfo); i++) {
1118 if (of_device_is_compatible(np, chipinfo[i].compat))
1119 break;
1120 if (is_old_ls1021a &&
1121 !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen"))
1122 break;
Emil Medvea513b722015-01-21 04:03:31 -06001123 }
1124
Scott Wood0dfc86b2015-09-19 23:29:54 -05001125 if (i == ARRAY_SIZE(chipinfo)) {
1126 pr_err("%s: unknown clockgen node %s\n", __func__,
1127 np->full_name);
1128 goto err;
Emil Medvea513b722015-01-21 04:03:31 -06001129 }
Scott Wood0dfc86b2015-09-19 23:29:54 -05001130 clockgen.info = chipinfo[i];
Emil Medvea513b722015-01-21 04:03:31 -06001131
Scott Wood0dfc86b2015-09-19 23:29:54 -05001132 if (clockgen.info.guts_compat) {
1133 struct device_node *guts;
Emil Medvea513b722015-01-21 04:03:31 -06001134
Scott Wood0dfc86b2015-09-19 23:29:54 -05001135 guts = of_find_compatible_node(NULL, NULL,
1136 clockgen.info.guts_compat);
1137 if (guts) {
1138 clockgen.guts = of_iomap(guts, 0);
1139 if (!clockgen.guts) {
1140 pr_err("%s: Couldn't map %s regs\n", __func__,
1141 guts->full_name);
1142 }
Emil Medvea513b722015-01-21 04:03:31 -06001143 }
1144
Emil Medvea513b722015-01-21 04:03:31 -06001145 }
1146
Scott Wood0dfc86b2015-09-19 23:29:54 -05001147 if (has_erratum_a4510())
1148 clockgen.info.flags |= CG_CMUX_GE_PLAT;
1149
1150 clockgen.sysclk = create_sysclk("cg-sysclk");
1151 create_plls(&clockgen);
1152 create_muxes(&clockgen);
1153
1154 if (clockgen.info.init_periph)
1155 clockgen.info.init_periph(&clockgen);
1156
1157 ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen);
1158 if (ret) {
1159 pr_err("%s: Couldn't register clk provider for node %s: %d\n",
1160 __func__, np->name, ret);
Emil Medvea513b722015-01-21 04:03:31 -06001161 }
1162
1163 return;
Scott Wood0dfc86b2015-09-19 23:29:54 -05001164err:
1165 iounmap(clockgen.regs);
1166 clockgen.regs = NULL;
Emil Medvea513b722015-01-21 04:03:31 -06001167}
1168
Scott Wood0dfc86b2015-09-19 23:29:54 -05001169CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
1170CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
1171CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
1172
1173/* Legacy nodes */
Kevin Hao66619ac2014-12-03 16:53:53 +08001174CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
1175CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
1176CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);
1177CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init);
1178CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init);
1179CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init);
Emil Medvea513b722015-01-21 04:03:31 -06001180CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init);
1181CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init);