blob: d0bf8b1c67de51ba27e756af870cea4ccfa67eec [file] [log] [blame]
Tang Yuantian555eae92013-04-09 16:46:26 +08001/*
2 * Copyright 2013 Freescale Semiconductor, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
Tang Yuantian93a17c02015-01-15 14:03:41 +08008 * clock driver for Freescale QorIQ SoCs.
Tang Yuantian555eae92013-04-09 16:46:26 +08009 */
Emil Medvec88b2b62015-01-21 04:03:29 -060010
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
Scott Wood0dfc86b2015-09-19 23:29:54 -050013#include <linux/clk.h>
Tang Yuantian555eae92013-04-09 16:46:26 +080014#include <linux/clk-provider.h>
Scott Wood0dfc86b2015-09-19 23:29:54 -050015#include <linux/fsl/guts.h>
Tang Yuantian555eae92013-04-09 16:46:26 +080016#include <linux/io.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
Rob Herringc11eede2013-11-10 23:19:08 -060019#include <linux/of_address.h>
Tang Yuantian555eae92013-04-09 16:46:26 +080020#include <linux/of_platform.h>
21#include <linux/of.h>
22#include <linux/slab.h>
23
Scott Wood0dfc86b2015-09-19 23:29:54 -050024#define PLL_DIV1 0
25#define PLL_DIV2 1
26#define PLL_DIV3 2
27#define PLL_DIV4 3
28
29#define PLATFORM_PLL 0
30#define CGA_PLL1 1
31#define CGA_PLL2 2
32#define CGA_PLL3 3
33#define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */
34#define CGB_PLL1 4
35#define CGB_PLL2 5
36
37struct clockgen_pll_div {
38 struct clk *clk;
39 char name[32];
Tang Yuantian555eae92013-04-09 16:46:26 +080040};
41
Scott Wood0dfc86b2015-09-19 23:29:54 -050042struct clockgen_pll {
43 struct clockgen_pll_div div[4];
44};
Tang Yuantian555eae92013-04-09 16:46:26 +080045
Scott Wood0dfc86b2015-09-19 23:29:54 -050046#define CLKSEL_VALID 1
47#define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */
48
49struct clockgen_sourceinfo {
50 u32 flags; /* CLKSEL_xxx */
51 int pll; /* CGx_PLLn */
52 int div; /* PLL_DIVn */
53};
54
55#define NUM_MUX_PARENTS 16
56
57struct clockgen_muxinfo {
58 struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS];
59};
60
61#define NUM_HWACCEL 5
62#define NUM_CMUX 8
63
64struct clockgen;
65
66/*
67 * cmux freq must be >= platform pll.
68 * If not set, cmux freq must be >= platform pll/2
69 */
70#define CG_CMUX_GE_PLAT 1
Scott Wood9e19ca22015-09-19 23:29:55 -050071
Scott Wood0dfc86b2015-09-19 23:29:54 -050072#define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */
Scott Wood9e19ca22015-09-19 23:29:55 -050073#define CG_VER3 4 /* version 3 cg: reg layout different */
74#define CG_LITTLE_ENDIAN 8
Scott Wood0dfc86b2015-09-19 23:29:54 -050075
76struct clockgen_chipinfo {
77 const char *compat, *guts_compat;
78 const struct clockgen_muxinfo *cmux_groups[2];
79 const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL];
80 void (*init_periph)(struct clockgen *cg);
81 int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */
82 u32 pll_mask; /* 1 << n bit set if PLL n is valid */
83 u32 flags; /* CG_xxx */
84};
85
86struct clockgen {
87 struct device_node *node;
88 void __iomem *regs;
89 struct clockgen_chipinfo info; /* mutable copy */
90 struct clk *sysclk;
91 struct clockgen_pll pll[6];
92 struct clk *cmux[NUM_CMUX];
93 struct clk *hwaccel[NUM_HWACCEL];
94 struct clk *fman[2];
95 struct ccsr_guts __iomem *guts;
96};
97
98static struct clockgen clockgen;
99
Scott Wood9e19ca22015-09-19 23:29:55 -0500100static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg)
101{
102 if (cg->info.flags & CG_LITTLE_ENDIAN)
103 iowrite32(val, reg);
104 else
105 iowrite32be(val, reg);
106}
107
108static u32 cg_in(struct clockgen *cg, u32 __iomem *reg)
109{
110 u32 val;
111
112 if (cg->info.flags & CG_LITTLE_ENDIAN)
113 val = ioread32(reg);
114 else
115 val = ioread32be(reg);
116
117 return val;
118}
119
Scott Wood0dfc86b2015-09-19 23:29:54 -0500120static const struct clockgen_muxinfo p2041_cmux_grp1 = {
121 {
122 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
123 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
124 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
125 }
126};
127
128static const struct clockgen_muxinfo p2041_cmux_grp2 = {
129 {
130 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
Scott Wood2c7693e2015-10-22 23:21:46 -0500131 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
132 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
Scott Wood0dfc86b2015-09-19 23:29:54 -0500133 }
134};
135
136static const struct clockgen_muxinfo p5020_cmux_grp1 = {
137 {
138 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
139 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
140 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
141 }
142};
143
144static const struct clockgen_muxinfo p5020_cmux_grp2 = {
145 {
146 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
147 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
148 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
149 }
150};
151
152static const struct clockgen_muxinfo p5040_cmux_grp1 = {
153 {
154 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
155 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
156 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
157 [5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 },
158 }
159};
160
161static const struct clockgen_muxinfo p5040_cmux_grp2 = {
162 {
163 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
164 [1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 },
165 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
166 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
167 }
168};
169
170static const struct clockgen_muxinfo p4080_cmux_grp1 = {
171 {
172 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
173 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
174 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
175 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
176 [8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 },
177 }
178};
179
180static const struct clockgen_muxinfo p4080_cmux_grp2 = {
181 {
182 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
183 [8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
184 [9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
185 [12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 },
186 [13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 },
187 }
188};
189
190static const struct clockgen_muxinfo t1023_cmux = {
191 {
192 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
193 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
194 }
195};
196
197static const struct clockgen_muxinfo t1040_cmux = {
198 {
199 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
200 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
201 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
202 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
203 }
204};
205
206
207static const struct clockgen_muxinfo clockgen2_cmux_cga = {
208 {
209 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
210 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
211 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
212 {},
213 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
214 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
215 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
216 {},
217 { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
218 { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
219 { CLKSEL_VALID, CGA_PLL3, PLL_DIV4 },
220 },
221};
222
223static const struct clockgen_muxinfo clockgen2_cmux_cga12 = {
224 {
225 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
226 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
227 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
228 {},
229 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
230 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
231 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
232 },
233};
234
235static const struct clockgen_muxinfo clockgen2_cmux_cgb = {
236 {
237 { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 },
238 { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
239 { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
240 {},
241 { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 },
242 { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
243 { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
244 },
245};
246
Hou Zhiqiange994412c2015-10-23 16:01:21 +0800247static const struct clockgen_muxinfo ls1043a_hwa1 = {
248 {
249 {},
250 {},
251 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
252 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
253 {},
254 {},
255 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
256 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
257 },
258};
259
260static const struct clockgen_muxinfo ls1043a_hwa2 = {
261 {
262 {},
263 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
264 {},
265 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
266 },
267};
268
Mingkai Hu80e52192016-09-07 11:48:30 +0800269static const struct clockgen_muxinfo ls1046a_hwa1 = {
270 {
271 {},
272 {},
273 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
274 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
275 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
276 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
277 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
278 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
279 },
280};
281
282static const struct clockgen_muxinfo ls1046a_hwa2 = {
283 {
284 {},
285 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
286 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
287 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
288 {},
289 {},
290 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
291 },
292};
293
Tang Yuantian44709352016-11-24 10:36:55 +0800294static const struct clockgen_muxinfo ls1012a_cmux = {
295 {
296 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
297 {},
298 [2] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
299 }
300};
301
Scott Wood0dfc86b2015-09-19 23:29:54 -0500302static const struct clockgen_muxinfo t1023_hwa1 = {
303 {
304 {},
305 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
306 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
307 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
308 },
309};
310
311static const struct clockgen_muxinfo t1023_hwa2 = {
312 {
313 [6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
314 },
315};
316
317static const struct clockgen_muxinfo t2080_hwa1 = {
318 {
319 {},
320 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
321 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
322 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
323 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
324 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
325 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
326 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
327 },
328};
329
330static const struct clockgen_muxinfo t2080_hwa2 = {
331 {
332 {},
333 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
334 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
335 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
336 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
337 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
338 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
339 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
340 },
341};
342
343static const struct clockgen_muxinfo t4240_hwa1 = {
344 {
345 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 },
346 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
347 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
348 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
349 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
350 {},
351 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
352 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
353 },
354};
355
356static const struct clockgen_muxinfo t4240_hwa4 = {
357 {
358 [2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
359 [3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
360 [4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
361 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
362 [6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
363 },
364};
365
366static const struct clockgen_muxinfo t4240_hwa5 = {
367 {
368 [2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
369 [3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 },
370 [4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
371 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
372 [6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
373 [7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
374 },
375};
376
377#define RCWSR7_FM1_CLK_SEL 0x40000000
378#define RCWSR7_FM2_CLK_SEL 0x20000000
379#define RCWSR7_HWA_ASYNC_DIV 0x04000000
380
381static void __init p2041_init_periph(struct clockgen *cg)
Tang Yuantian555eae92013-04-09 16:46:26 +0800382{
Scott Wood0dfc86b2015-09-19 23:29:54 -0500383 u32 reg;
384
385 reg = ioread32be(&cg->guts->rcwsr[7]);
386
387 if (reg & RCWSR7_FM1_CLK_SEL)
388 cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk;
389 else
390 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
391}
392
393static void __init p4080_init_periph(struct clockgen *cg)
394{
395 u32 reg;
396
397 reg = ioread32be(&cg->guts->rcwsr[7]);
398
399 if (reg & RCWSR7_FM1_CLK_SEL)
400 cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
401 else
402 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
403
404 if (reg & RCWSR7_FM2_CLK_SEL)
405 cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
406 else
407 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
408}
409
410static void __init p5020_init_periph(struct clockgen *cg)
411{
412 u32 reg;
413 int div = PLL_DIV2;
414
415 reg = ioread32be(&cg->guts->rcwsr[7]);
416 if (reg & RCWSR7_HWA_ASYNC_DIV)
417 div = PLL_DIV4;
418
419 if (reg & RCWSR7_FM1_CLK_SEL)
420 cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk;
421 else
422 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
423}
424
425static void __init p5040_init_periph(struct clockgen *cg)
426{
427 u32 reg;
428 int div = PLL_DIV2;
429
430 reg = ioread32be(&cg->guts->rcwsr[7]);
431 if (reg & RCWSR7_HWA_ASYNC_DIV)
432 div = PLL_DIV4;
433
434 if (reg & RCWSR7_FM1_CLK_SEL)
435 cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk;
436 else
437 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
438
439 if (reg & RCWSR7_FM2_CLK_SEL)
440 cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk;
441 else
442 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
443}
444
445static void __init t1023_init_periph(struct clockgen *cg)
446{
447 cg->fman[0] = cg->hwaccel[1];
448}
449
450static void __init t1040_init_periph(struct clockgen *cg)
451{
452 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk;
453}
454
455static void __init t2080_init_periph(struct clockgen *cg)
456{
457 cg->fman[0] = cg->hwaccel[0];
458}
459
460static void __init t4240_init_periph(struct clockgen *cg)
461{
462 cg->fman[0] = cg->hwaccel[3];
463 cg->fman[1] = cg->hwaccel[4];
464}
465
466static const struct clockgen_chipinfo chipinfo[] = {
467 {
468 .compat = "fsl,b4420-clockgen",
469 .guts_compat = "fsl,b4860-device-config",
470 .init_periph = t2080_init_periph,
471 .cmux_groups = {
472 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
473 },
474 .hwaccel = {
475 &t2080_hwa1
476 },
477 .cmux_to_group = {
478 0, 1, 1, 1, -1
479 },
480 .pll_mask = 0x3f,
481 .flags = CG_PLL_8BIT,
482 },
483 {
484 .compat = "fsl,b4860-clockgen",
485 .guts_compat = "fsl,b4860-device-config",
486 .init_periph = t2080_init_periph,
487 .cmux_groups = {
488 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
489 },
490 .hwaccel = {
491 &t2080_hwa1
492 },
493 .cmux_to_group = {
494 0, 1, 1, 1, -1
495 },
496 .pll_mask = 0x3f,
497 .flags = CG_PLL_8BIT,
498 },
499 {
500 .compat = "fsl,ls1021a-clockgen",
501 .cmux_groups = {
502 &t1023_cmux
503 },
504 .cmux_to_group = {
505 0, -1
506 },
507 .pll_mask = 0x03,
508 },
509 {
Hou Zhiqiange994412c2015-10-23 16:01:21 +0800510 .compat = "fsl,ls1043a-clockgen",
511 .init_periph = t2080_init_periph,
512 .cmux_groups = {
513 &t1040_cmux
514 },
515 .hwaccel = {
516 &ls1043a_hwa1, &ls1043a_hwa2
517 },
518 .cmux_to_group = {
519 0, -1
520 },
521 .pll_mask = 0x07,
522 .flags = CG_PLL_8BIT,
523 },
524 {
Mingkai Hu80e52192016-09-07 11:48:30 +0800525 .compat = "fsl,ls1046a-clockgen",
526 .init_periph = t2080_init_periph,
527 .cmux_groups = {
528 &t1040_cmux
529 },
530 .hwaccel = {
531 &ls1046a_hwa1, &ls1046a_hwa2
532 },
533 .cmux_to_group = {
534 0, -1
535 },
536 .pll_mask = 0x07,
537 .flags = CG_PLL_8BIT,
538 },
539 {
Tang Yuantian44709352016-11-24 10:36:55 +0800540 .compat = "fsl,ls1012a-clockgen",
541 .cmux_groups = {
542 &ls1012a_cmux
543 },
544 .cmux_to_group = {
545 0, -1
546 },
547 .pll_mask = 0x03,
548 },
549 {
Scott Wood9e19ca22015-09-19 23:29:55 -0500550 .compat = "fsl,ls2080a-clockgen",
551 .cmux_groups = {
552 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
553 },
554 .cmux_to_group = {
555 0, 0, 1, 1, -1
556 },
557 .pll_mask = 0x37,
558 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
559 },
560 {
Scott Wood0dfc86b2015-09-19 23:29:54 -0500561 .compat = "fsl,p2041-clockgen",
562 .guts_compat = "fsl,qoriq-device-config-1.0",
563 .init_periph = p2041_init_periph,
564 .cmux_groups = {
565 &p2041_cmux_grp1, &p2041_cmux_grp2
566 },
567 .cmux_to_group = {
568 0, 0, 1, 1, -1
569 },
570 .pll_mask = 0x07,
571 },
572 {
573 .compat = "fsl,p3041-clockgen",
574 .guts_compat = "fsl,qoriq-device-config-1.0",
575 .init_periph = p2041_init_periph,
576 .cmux_groups = {
577 &p2041_cmux_grp1, &p2041_cmux_grp2
578 },
579 .cmux_to_group = {
580 0, 0, 1, 1, -1
581 },
582 .pll_mask = 0x07,
583 },
584 {
585 .compat = "fsl,p4080-clockgen",
586 .guts_compat = "fsl,qoriq-device-config-1.0",
587 .init_periph = p4080_init_periph,
588 .cmux_groups = {
589 &p4080_cmux_grp1, &p4080_cmux_grp2
590 },
591 .cmux_to_group = {
592 0, 0, 0, 0, 1, 1, 1, 1
593 },
594 .pll_mask = 0x1f,
595 },
596 {
597 .compat = "fsl,p5020-clockgen",
598 .guts_compat = "fsl,qoriq-device-config-1.0",
599 .init_periph = p5020_init_periph,
600 .cmux_groups = {
601 &p2041_cmux_grp1, &p2041_cmux_grp2
602 },
603 .cmux_to_group = {
604 0, 1, -1
605 },
606 .pll_mask = 0x07,
607 },
608 {
609 .compat = "fsl,p5040-clockgen",
610 .guts_compat = "fsl,p5040-device-config",
611 .init_periph = p5040_init_periph,
612 .cmux_groups = {
613 &p5040_cmux_grp1, &p5040_cmux_grp2
614 },
615 .cmux_to_group = {
616 0, 0, 1, 1, -1
617 },
618 .pll_mask = 0x0f,
619 },
620 {
621 .compat = "fsl,t1023-clockgen",
622 .guts_compat = "fsl,t1023-device-config",
623 .init_periph = t1023_init_periph,
624 .cmux_groups = {
625 &t1023_cmux
626 },
627 .hwaccel = {
628 &t1023_hwa1, &t1023_hwa2
629 },
630 .cmux_to_group = {
631 0, 0, -1
632 },
633 .pll_mask = 0x03,
634 .flags = CG_PLL_8BIT,
635 },
636 {
637 .compat = "fsl,t1040-clockgen",
638 .guts_compat = "fsl,t1040-device-config",
639 .init_periph = t1040_init_periph,
640 .cmux_groups = {
641 &t1040_cmux
642 },
643 .cmux_to_group = {
644 0, 0, 0, 0, -1
645 },
646 .pll_mask = 0x07,
647 .flags = CG_PLL_8BIT,
648 },
649 {
650 .compat = "fsl,t2080-clockgen",
651 .guts_compat = "fsl,t2080-device-config",
652 .init_periph = t2080_init_periph,
653 .cmux_groups = {
654 &clockgen2_cmux_cga12
655 },
656 .hwaccel = {
657 &t2080_hwa1, &t2080_hwa2
658 },
659 .cmux_to_group = {
660 0, -1
661 },
662 .pll_mask = 0x07,
663 .flags = CG_PLL_8BIT,
664 },
665 {
666 .compat = "fsl,t4240-clockgen",
667 .guts_compat = "fsl,t4240-device-config",
668 .init_periph = t4240_init_periph,
669 .cmux_groups = {
670 &clockgen2_cmux_cga, &clockgen2_cmux_cgb
671 },
672 .hwaccel = {
673 &t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5
674 },
675 .cmux_to_group = {
676 0, 0, 1, -1
677 },
678 .pll_mask = 0x3f,
679 .flags = CG_PLL_8BIT,
680 },
681 {},
682};
683
684struct mux_hwclock {
685 struct clk_hw hw;
686 struct clockgen *cg;
687 const struct clockgen_muxinfo *info;
688 u32 __iomem *reg;
689 u8 parent_to_clksel[NUM_MUX_PARENTS];
690 s8 clksel_to_parent[NUM_MUX_PARENTS];
691 int num_parents;
692};
693
694#define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw)
695#define CLKSEL_MASK 0x78000000
696#define CLKSEL_SHIFT 27
697
698static int mux_set_parent(struct clk_hw *hw, u8 idx)
699{
700 struct mux_hwclock *hwc = to_mux_hwclock(hw);
Tang Yuantian555eae92013-04-09 16:46:26 +0800701 u32 clksel;
702
Scott Wood0dfc86b2015-09-19 23:29:54 -0500703 if (idx >= hwc->num_parents)
704 return -EINVAL;
705
706 clksel = hwc->parent_to_clksel[idx];
Scott Wood9e19ca22015-09-19 23:29:55 -0500707 cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg);
Tang Yuantian555eae92013-04-09 16:46:26 +0800708
709 return 0;
710}
711
Scott Wood0dfc86b2015-09-19 23:29:54 -0500712static u8 mux_get_parent(struct clk_hw *hw)
Tang Yuantian555eae92013-04-09 16:46:26 +0800713{
Scott Wood0dfc86b2015-09-19 23:29:54 -0500714 struct mux_hwclock *hwc = to_mux_hwclock(hw);
Tang Yuantian555eae92013-04-09 16:46:26 +0800715 u32 clksel;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500716 s8 ret;
Tang Yuantian555eae92013-04-09 16:46:26 +0800717
Scott Wood9e19ca22015-09-19 23:29:55 -0500718 clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
Tang Yuantian555eae92013-04-09 16:46:26 +0800719
Scott Wood0dfc86b2015-09-19 23:29:54 -0500720 ret = hwc->clksel_to_parent[clksel];
721 if (ret < 0) {
722 pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg);
723 return 0;
724 }
725
726 return ret;
Tang Yuantian555eae92013-04-09 16:46:26 +0800727}
728
Emil Medve334680d2015-01-21 04:03:27 -0600729static const struct clk_ops cmux_ops = {
Scott Wood0dfc86b2015-09-19 23:29:54 -0500730 .get_parent = mux_get_parent,
731 .set_parent = mux_set_parent,
Tang Yuantian555eae92013-04-09 16:46:26 +0800732};
733
Scott Wood0dfc86b2015-09-19 23:29:54 -0500734/*
735 * Don't allow setting for now, as the clock options haven't been
736 * sanitized for additional restrictions.
737 */
738static const struct clk_ops hwaccel_ops = {
739 .get_parent = mux_get_parent,
740};
741
742static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg,
743 struct mux_hwclock *hwc,
744 int idx)
745{
746 int pll, div;
747
748 if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID))
749 return NULL;
750
751 pll = hwc->info->clksel[idx].pll;
752 div = hwc->info->clksel[idx].div;
753
754 return &cg->pll[pll].div[div];
755}
756
757static struct clk * __init create_mux_common(struct clockgen *cg,
758 struct mux_hwclock *hwc,
759 const struct clk_ops *ops,
760 unsigned long min_rate,
Scott Wood7c1c5412016-10-17 13:42:23 -0500761 unsigned long max_rate,
Scott Wood0dfc86b2015-09-19 23:29:54 -0500762 unsigned long pct80_rate,
763 const char *fmt, int idx)
764{
765 struct clk_init_data init = {};
766 struct clk *clk;
767 const struct clockgen_pll_div *div;
768 const char *parent_names[NUM_MUX_PARENTS];
769 char name[32];
770 int i, j;
771
772 snprintf(name, sizeof(name), fmt, idx);
773
774 for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) {
775 unsigned long rate;
776
777 hwc->clksel_to_parent[i] = -1;
778
779 div = get_pll_div(cg, hwc, i);
780 if (!div)
781 continue;
782
783 rate = clk_get_rate(div->clk);
784
785 if (hwc->info->clksel[i].flags & CLKSEL_80PCT &&
786 rate > pct80_rate)
787 continue;
788 if (rate < min_rate)
789 continue;
Scott Wood7c1c5412016-10-17 13:42:23 -0500790 if (rate > max_rate)
791 continue;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500792
793 parent_names[j] = div->name;
794 hwc->parent_to_clksel[j] = i;
795 hwc->clksel_to_parent[i] = j;
796 j++;
797 }
798
799 init.name = name;
800 init.ops = ops;
801 init.parent_names = parent_names;
802 init.num_parents = hwc->num_parents = j;
803 init.flags = 0;
804 hwc->hw.init = &init;
805 hwc->cg = cg;
806
807 clk = clk_register(NULL, &hwc->hw);
808 if (IS_ERR(clk)) {
809 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
810 PTR_ERR(clk));
811 kfree(hwc);
812 return NULL;
813 }
814
815 return clk;
816}
817
818static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
819{
820 struct mux_hwclock *hwc;
821 const struct clockgen_pll_div *div;
822 unsigned long plat_rate, min_rate;
Scott Wood7c1c5412016-10-17 13:42:23 -0500823 u64 max_rate, pct80_rate;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500824 u32 clksel;
825
826 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
827 if (!hwc)
828 return NULL;
829
Tang Yuantian89641932016-08-15 15:28:20 +0800830 if (cg->info.flags & CG_VER3)
831 hwc->reg = cg->regs + 0x70000 + 0x20 * idx;
832 else
833 hwc->reg = cg->regs + 0x20 * idx;
834
Scott Wood0dfc86b2015-09-19 23:29:54 -0500835 hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]];
836
837 /*
838 * Find the rate for the default clksel, and treat it as the
839 * maximum rated core frequency. If this is an incorrect
840 * assumption, certain clock options (possibly including the
841 * default clksel) may be inappropriately excluded on certain
842 * chips.
843 */
Scott Wood9e19ca22015-09-19 23:29:55 -0500844 clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500845 div = get_pll_div(cg, hwc, clksel);
Sudip Mukherjee279104e2015-11-23 15:36:50 +0530846 if (!div) {
847 kfree(hwc);
Scott Wood0dfc86b2015-09-19 23:29:54 -0500848 return NULL;
Sudip Mukherjee279104e2015-11-23 15:36:50 +0530849 }
Scott Wood0dfc86b2015-09-19 23:29:54 -0500850
Scott Wood7c1c5412016-10-17 13:42:23 -0500851 max_rate = clk_get_rate(div->clk);
852 pct80_rate = max_rate * 8;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500853 do_div(pct80_rate, 10);
854
855 plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
856
857 if (cg->info.flags & CG_CMUX_GE_PLAT)
858 min_rate = plat_rate;
859 else
860 min_rate = plat_rate / 2;
861
Scott Wood7c1c5412016-10-17 13:42:23 -0500862 return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
Scott Wood0dfc86b2015-09-19 23:29:54 -0500863 pct80_rate, "cg-cmux%d", idx);
864}
865
866static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
867{
868 struct mux_hwclock *hwc;
869
870 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
871 if (!hwc)
872 return NULL;
873
874 hwc->reg = cg->regs + 0x20 * idx + 0x10;
875 hwc->info = cg->info.hwaccel[idx];
876
Scott Wood7c1c5412016-10-17 13:42:23 -0500877 return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
Scott Wood0dfc86b2015-09-19 23:29:54 -0500878 "cg-hwaccel%d", idx);
879}
880
881static void __init create_muxes(struct clockgen *cg)
882{
883 int i;
884
885 for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) {
886 if (cg->info.cmux_to_group[i] < 0)
887 break;
888 if (cg->info.cmux_to_group[i] >=
889 ARRAY_SIZE(cg->info.cmux_groups)) {
890 WARN_ON_ONCE(1);
891 continue;
892 }
893
894 cg->cmux[i] = create_one_cmux(cg, i);
895 }
896
897 for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) {
898 if (!cg->info.hwaccel[i])
899 continue;
900
901 cg->hwaccel[i] = create_one_hwaccel(cg, i);
902 }
903}
904
905static void __init clockgen_init(struct device_node *np);
906
907/* Legacy nodes may get probed before the parent clockgen node */
908static void __init legacy_init_clockgen(struct device_node *np)
909{
910 if (!clockgen.node)
911 clockgen_init(of_get_parent(np));
912}
913
914/* Legacy node */
Tang Yuantian555eae92013-04-09 16:46:26 +0800915static void __init core_mux_init(struct device_node *np)
916{
917 struct clk *clk;
Scott Wood0dfc86b2015-09-19 23:29:54 -0500918 struct resource res;
919 int idx, rc;
Tang Yuantian555eae92013-04-09 16:46:26 +0800920
Scott Wood0dfc86b2015-09-19 23:29:54 -0500921 legacy_init_clockgen(np);
Tang Yuantian555eae92013-04-09 16:46:26 +0800922
Scott Wood0dfc86b2015-09-19 23:29:54 -0500923 if (of_address_to_resource(np, 0, &res))
Tang Yuantian555eae92013-04-09 16:46:26 +0800924 return;
Tang Yuantian555eae92013-04-09 16:46:26 +0800925
Scott Wood0dfc86b2015-09-19 23:29:54 -0500926 idx = (res.start & 0xf0) >> 5;
927 clk = clockgen.cmux[idx];
Tang Yuantian555eae92013-04-09 16:46:26 +0800928
929 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
930 if (rc) {
Scott Wood0dfc86b2015-09-19 23:29:54 -0500931 pr_err("%s: Couldn't register clk provider for node %s: %d\n",
932 __func__, np->name, rc);
933 return;
Tang Yuantian555eae92013-04-09 16:46:26 +0800934 }
Tang Yuantian555eae92013-04-09 16:46:26 +0800935}
936
Julia Lawall3432a2e2016-04-18 16:55:34 +0200937static struct clk __init
938*sysclk_from_fixed(struct device_node *node, const char *name)
Tang Yuantian555eae92013-04-09 16:46:26 +0800939{
Scott Wood0dfc86b2015-09-19 23:29:54 -0500940 u32 rate;
Tang Yuantian555eae92013-04-09 16:46:26 +0800941
Scott Wood0dfc86b2015-09-19 23:29:54 -0500942 if (of_property_read_u32(node, "clock-frequency", &rate))
943 return ERR_PTR(-ENODEV);
944
Stephen Boydec3f2fc2016-03-01 11:00:19 -0800945 return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
Scott Wood0dfc86b2015-09-19 23:29:54 -0500946}
947
948static struct clk *sysclk_from_parent(const char *name)
949{
950 struct clk *clk;
951 const char *parent_name;
952
953 clk = of_clk_get(clockgen.node, 0);
954 if (IS_ERR(clk))
955 return clk;
956
957 /* Register the input clock under the desired name. */
958 parent_name = __clk_get_name(clk);
959 clk = clk_register_fixed_factor(NULL, name, parent_name,
960 0, 1, 1);
961 if (IS_ERR(clk))
962 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
963 PTR_ERR(clk));
964
965 return clk;
966}
967
968static struct clk * __init create_sysclk(const char *name)
969{
970 struct device_node *sysclk;
971 struct clk *clk;
972
973 clk = sysclk_from_fixed(clockgen.node, name);
974 if (!IS_ERR(clk))
975 return clk;
976
977 clk = sysclk_from_parent(name);
978 if (!IS_ERR(clk))
979 return clk;
980
981 sysclk = of_get_child_by_name(clockgen.node, "sysclk");
982 if (sysclk) {
983 clk = sysclk_from_fixed(sysclk, name);
984 if (!IS_ERR(clk))
985 return clk;
986 }
987
988 pr_err("%s: No input clock\n", __func__);
989 return NULL;
990}
991
992/* Legacy node */
993static void __init sysclk_init(struct device_node *node)
994{
995 struct clk *clk;
996
997 legacy_init_clockgen(node);
998
999 clk = clockgen.sysclk;
1000 if (clk)
1001 of_clk_add_provider(node, of_clk_src_simple_get, clk);
1002}
1003
1004#define PLL_KILL BIT(31)
1005
1006static void __init create_one_pll(struct clockgen *cg, int idx)
1007{
1008 u32 __iomem *reg;
1009 u32 mult;
1010 struct clockgen_pll *pll = &cg->pll[idx];
1011 int i;
1012
1013 if (!(cg->info.pll_mask & (1 << idx)))
1014 return;
1015
Scott Wood9e19ca22015-09-19 23:29:55 -05001016 if (cg->info.flags & CG_VER3) {
1017 switch (idx) {
1018 case PLATFORM_PLL:
1019 reg = cg->regs + 0x60080;
1020 break;
1021 case CGA_PLL1:
1022 reg = cg->regs + 0x80;
1023 break;
1024 case CGA_PLL2:
1025 reg = cg->regs + 0xa0;
1026 break;
1027 case CGB_PLL1:
1028 reg = cg->regs + 0x10080;
1029 break;
1030 case CGB_PLL2:
1031 reg = cg->regs + 0x100a0;
1032 break;
1033 default:
1034 WARN_ONCE(1, "index %d\n", idx);
1035 return;
1036 }
1037 } else {
1038 if (idx == PLATFORM_PLL)
1039 reg = cg->regs + 0xc00;
1040 else
1041 reg = cg->regs + 0x800 + 0x20 * (idx - 1);
1042 }
Scott Wood0dfc86b2015-09-19 23:29:54 -05001043
1044 /* Get the multiple of PLL */
Scott Wood9e19ca22015-09-19 23:29:55 -05001045 mult = cg_in(cg, reg);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001046
1047 /* Check if this PLL is disabled */
1048 if (mult & PLL_KILL) {
1049 pr_debug("%s(): pll %p disabled\n", __func__, reg);
Tang Yuantian555eae92013-04-09 16:46:26 +08001050 return;
1051 }
1052
Scott Wood9e19ca22015-09-19 23:29:55 -05001053 if ((cg->info.flags & CG_VER3) ||
1054 ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL))
Scott Wood0dfc86b2015-09-19 23:29:54 -05001055 mult = (mult & GENMASK(8, 1)) >> 1;
1056 else
1057 mult = (mult & GENMASK(6, 1)) >> 1;
Tang Yuantian555eae92013-04-09 16:46:26 +08001058
Scott Wood0dfc86b2015-09-19 23:29:54 -05001059 for (i = 0; i < ARRAY_SIZE(pll->div); i++) {
1060 struct clk *clk;
1061
1062 snprintf(pll->div[i].name, sizeof(pll->div[i].name),
1063 "cg-pll%d-div%d", idx, i + 1);
1064
1065 clk = clk_register_fixed_factor(NULL,
1066 pll->div[i].name, "cg-sysclk", 0, mult, i + 1);
1067 if (IS_ERR(clk)) {
1068 pr_err("%s: %s: register failed %ld\n",
1069 __func__, pll->div[i].name, PTR_ERR(clk));
1070 continue;
1071 }
1072
1073 pll->div[i].clk = clk;
Tang Yuantian555eae92013-04-09 16:46:26 +08001074 }
Scott Wood0dfc86b2015-09-19 23:29:54 -05001075}
Tang Yuantian555eae92013-04-09 16:46:26 +08001076
Scott Wood0dfc86b2015-09-19 23:29:54 -05001077static void __init create_plls(struct clockgen *cg)
1078{
1079 int i;
Tang Yuantian555eae92013-04-09 16:46:26 +08001080
Scott Wood0dfc86b2015-09-19 23:29:54 -05001081 for (i = 0; i < ARRAY_SIZE(cg->pll); i++)
1082 create_one_pll(cg, i);
1083}
1084
1085static void __init legacy_pll_init(struct device_node *np, int idx)
1086{
1087 struct clockgen_pll *pll;
1088 struct clk_onecell_data *onecell_data;
1089 struct clk **subclks;
1090 int count, rc;
1091
1092 legacy_init_clockgen(np);
1093
1094 pll = &clockgen.pll[idx];
Tang Yuantian555eae92013-04-09 16:46:26 +08001095 count = of_property_count_strings(np, "clock-output-names");
Tang Yuantian555eae92013-04-09 16:46:26 +08001096
Scott Wood0dfc86b2015-09-19 23:29:54 -05001097 BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4);
1098 subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL);
Emil Medve8002cab2015-01-21 04:03:26 -06001099 if (!subclks)
Scott Wood0dfc86b2015-09-19 23:29:54 -05001100 return;
Tang Yuantian555eae92013-04-09 16:46:26 +08001101
Emil Medve6ef1cca2015-01-21 04:03:28 -06001102 onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
Emil Medve8002cab2015-01-21 04:03:26 -06001103 if (!onecell_data)
Tang Yuantian555eae92013-04-09 16:46:26 +08001104 goto err_clks;
Tang Yuantian555eae92013-04-09 16:46:26 +08001105
Scott Wood0dfc86b2015-09-19 23:29:54 -05001106 if (count <= 3) {
1107 subclks[0] = pll->div[0].clk;
1108 subclks[1] = pll->div[1].clk;
1109 subclks[2] = pll->div[3].clk;
1110 } else {
1111 subclks[0] = pll->div[0].clk;
1112 subclks[1] = pll->div[1].clk;
1113 subclks[2] = pll->div[2].clk;
1114 subclks[3] = pll->div[3].clk;
Tang Yuantian555eae92013-04-09 16:46:26 +08001115 }
1116
1117 onecell_data->clks = subclks;
1118 onecell_data->clk_num = count;
1119
1120 rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
1121 if (rc) {
Scott Wood0dfc86b2015-09-19 23:29:54 -05001122 pr_err("%s: Couldn't register clk provider for node %s: %d\n",
1123 __func__, np->name, rc);
Tang Yuantian555eae92013-04-09 16:46:26 +08001124 goto err_cell;
1125 }
1126
1127 return;
1128err_cell:
1129 kfree(onecell_data);
1130err_clks:
1131 kfree(subclks);
Tang Yuantian00fa6e52014-01-21 09:32:45 +08001132}
1133
Scott Wood0dfc86b2015-09-19 23:29:54 -05001134/* Legacy node */
Emil Medvea513b722015-01-21 04:03:31 -06001135static void __init pltfrm_pll_init(struct device_node *np)
1136{
Scott Wood0dfc86b2015-09-19 23:29:54 -05001137 legacy_pll_init(np, PLATFORM_PLL);
1138}
Emil Medvea513b722015-01-21 04:03:31 -06001139
Scott Wood0dfc86b2015-09-19 23:29:54 -05001140/* Legacy node */
1141static void __init core_pll_init(struct device_node *np)
1142{
1143 struct resource res;
1144 int idx;
1145
1146 if (of_address_to_resource(np, 0, &res))
1147 return;
1148
1149 if ((res.start & 0xfff) == 0xc00) {
1150 /*
1151 * ls1021a devtree labels the platform PLL
1152 * with the core PLL compatible
1153 */
1154 pltfrm_pll_init(np);
1155 } else {
1156 idx = (res.start & 0xf0) >> 5;
1157 legacy_pll_init(np, CGA_PLL1 + idx);
1158 }
1159}
1160
1161static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data)
1162{
1163 struct clockgen *cg = data;
1164 struct clk *clk;
1165 struct clockgen_pll *pll;
1166 u32 type, idx;
1167
1168 if (clkspec->args_count < 2) {
1169 pr_err("%s: insufficient phandle args\n", __func__);
1170 return ERR_PTR(-EINVAL);
1171 }
1172
1173 type = clkspec->args[0];
1174 idx = clkspec->args[1];
1175
1176 switch (type) {
1177 case 0:
1178 if (idx != 0)
1179 goto bad_args;
1180 clk = cg->sysclk;
1181 break;
1182 case 1:
1183 if (idx >= ARRAY_SIZE(cg->cmux))
1184 goto bad_args;
1185 clk = cg->cmux[idx];
1186 break;
1187 case 2:
1188 if (idx >= ARRAY_SIZE(cg->hwaccel))
1189 goto bad_args;
1190 clk = cg->hwaccel[idx];
1191 break;
1192 case 3:
1193 if (idx >= ARRAY_SIZE(cg->fman))
1194 goto bad_args;
1195 clk = cg->fman[idx];
1196 break;
1197 case 4:
1198 pll = &cg->pll[PLATFORM_PLL];
1199 if (idx >= ARRAY_SIZE(pll->div))
1200 goto bad_args;
1201 clk = pll->div[idx].clk;
1202 break;
1203 default:
1204 goto bad_args;
1205 }
1206
1207 if (!clk)
1208 return ERR_PTR(-ENOENT);
1209 return clk;
1210
1211bad_args:
1212 pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx);
1213 return ERR_PTR(-EINVAL);
1214}
1215
1216#ifdef CONFIG_PPC
1217#include <asm/mpc85xx.h>
1218
1219static const u32 a4510_svrs[] __initconst = {
1220 (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */
1221 (SVR_P2040 << 8) | 0x11, /* P2040 1.1 */
1222 (SVR_P2041 << 8) | 0x10, /* P2041 1.0 */
1223 (SVR_P2041 << 8) | 0x11, /* P2041 1.1 */
1224 (SVR_P3041 << 8) | 0x10, /* P3041 1.0 */
1225 (SVR_P3041 << 8) | 0x11, /* P3041 1.1 */
1226 (SVR_P4040 << 8) | 0x20, /* P4040 2.0 */
1227 (SVR_P4080 << 8) | 0x20, /* P4080 2.0 */
1228 (SVR_P5010 << 8) | 0x10, /* P5010 1.0 */
1229 (SVR_P5010 << 8) | 0x20, /* P5010 2.0 */
1230 (SVR_P5020 << 8) | 0x10, /* P5020 1.0 */
1231 (SVR_P5021 << 8) | 0x10, /* P5021 1.0 */
1232 (SVR_P5040 << 8) | 0x10, /* P5040 1.0 */
1233};
1234
1235#define SVR_SECURITY 0x80000 /* The Security (E) bit */
1236
1237static bool __init has_erratum_a4510(void)
1238{
1239 u32 svr = mfspr(SPRN_SVR);
1240 int i;
1241
1242 svr &= ~SVR_SECURITY;
1243
1244 for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) {
1245 if (svr == a4510_svrs[i])
1246 return true;
1247 }
1248
1249 return false;
1250}
1251#else
1252static bool __init has_erratum_a4510(void)
1253{
1254 return false;
1255}
1256#endif
1257
1258static void __init clockgen_init(struct device_node *np)
1259{
1260 int i, ret;
1261 bool is_old_ls1021a = false;
1262
1263 /* May have already been called by a legacy probe */
1264 if (clockgen.node)
1265 return;
1266
1267 clockgen.node = np;
1268 clockgen.regs = of_iomap(np, 0);
1269 if (!clockgen.regs &&
1270 of_device_is_compatible(of_root, "fsl,ls1021a")) {
1271 /* Compatibility hack for old, broken device trees */
1272 clockgen.regs = ioremap(0x1ee1000, 0x1000);
1273 is_old_ls1021a = true;
1274 }
1275 if (!clockgen.regs) {
Emil Medvea513b722015-01-21 04:03:31 -06001276 pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name);
1277 return;
1278 }
1279
Scott Wood0dfc86b2015-09-19 23:29:54 -05001280 for (i = 0; i < ARRAY_SIZE(chipinfo); i++) {
1281 if (of_device_is_compatible(np, chipinfo[i].compat))
1282 break;
1283 if (is_old_ls1021a &&
1284 !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen"))
1285 break;
Emil Medvea513b722015-01-21 04:03:31 -06001286 }
1287
Scott Wood0dfc86b2015-09-19 23:29:54 -05001288 if (i == ARRAY_SIZE(chipinfo)) {
1289 pr_err("%s: unknown clockgen node %s\n", __func__,
1290 np->full_name);
1291 goto err;
Emil Medvea513b722015-01-21 04:03:31 -06001292 }
Scott Wood0dfc86b2015-09-19 23:29:54 -05001293 clockgen.info = chipinfo[i];
Emil Medvea513b722015-01-21 04:03:31 -06001294
Scott Wood0dfc86b2015-09-19 23:29:54 -05001295 if (clockgen.info.guts_compat) {
1296 struct device_node *guts;
Emil Medvea513b722015-01-21 04:03:31 -06001297
Scott Wood0dfc86b2015-09-19 23:29:54 -05001298 guts = of_find_compatible_node(NULL, NULL,
1299 clockgen.info.guts_compat);
1300 if (guts) {
1301 clockgen.guts = of_iomap(guts, 0);
1302 if (!clockgen.guts) {
1303 pr_err("%s: Couldn't map %s regs\n", __func__,
1304 guts->full_name);
1305 }
Emil Medvea513b722015-01-21 04:03:31 -06001306 }
1307
Emil Medvea513b722015-01-21 04:03:31 -06001308 }
1309
Scott Wood0dfc86b2015-09-19 23:29:54 -05001310 if (has_erratum_a4510())
1311 clockgen.info.flags |= CG_CMUX_GE_PLAT;
1312
1313 clockgen.sysclk = create_sysclk("cg-sysclk");
1314 create_plls(&clockgen);
1315 create_muxes(&clockgen);
1316
1317 if (clockgen.info.init_periph)
1318 clockgen.info.init_periph(&clockgen);
1319
1320 ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen);
1321 if (ret) {
1322 pr_err("%s: Couldn't register clk provider for node %s: %d\n",
1323 __func__, np->name, ret);
Emil Medvea513b722015-01-21 04:03:31 -06001324 }
1325
1326 return;
Scott Wood0dfc86b2015-09-19 23:29:54 -05001327err:
1328 iounmap(clockgen.regs);
1329 clockgen.regs = NULL;
Emil Medvea513b722015-01-21 04:03:31 -06001330}
1331
Scott Wood0dfc86b2015-09-19 23:29:54 -05001332CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
1333CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
Tang Yuantian44709352016-11-24 10:36:55 +08001334CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001335CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
Hou Zhiqiange994412c2015-10-23 16:01:21 +08001336CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init);
Mingkai Hu80e52192016-09-07 11:48:30 +08001337CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init);
Scott Wood9e19ca22015-09-19 23:29:55 -05001338CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
Scott Wood0dfc86b2015-09-19 23:29:54 -05001339
1340/* Legacy nodes */
Kevin Hao66619ac2014-12-03 16:53:53 +08001341CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
1342CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
1343CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);
1344CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init);
1345CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init);
1346CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init);
Emil Medvea513b722015-01-21 04:03:31 -06001347CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init);
1348CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init);