blob: 9074e6974b6de75d39f84170222a2e7d9c1a96ca [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Martin Fuzzey75f83d02013-04-23 20:16:59 +08002#include <linux/clk.h>
3#include <linux/err.h>
4#include <linux/of.h>
5#include <linux/slab.h>
Sascha Hauer3a84d172012-09-11 08:50:00 +02006#include <linux/spinlock.h>
Fabio Estevam41921c12013-03-25 09:20:38 -03007#include "clk.h"
Sascha Hauer3a84d172012-09-11 08:50:00 +02008
9DEFINE_SPINLOCK(imx_ccm_lock);
Martin Fuzzey75f83d02013-04-23 20:16:59 +080010
Alexander Shiyan229be9c2014-06-10 19:40:26 +040011void __init imx_check_clocks(struct clk *clks[], unsigned int count)
12{
13 unsigned i;
14
15 for (i = 0; i < count; i++)
16 if (IS_ERR(clks[i]))
17 pr_err("i.MX clk %u: register failed with %ld\n",
18 i, PTR_ERR(clks[i]));
19}
20
Martin Fuzzey75f83d02013-04-23 20:16:59 +080021static struct clk * __init imx_obtain_fixed_clock_from_dt(const char *name)
22{
Fabio Estevamcc27cce2013-05-24 16:55:42 -030023 struct of_phandle_args phandle;
Martin Fuzzey75f83d02013-04-23 20:16:59 +080024 struct clk *clk = ERR_PTR(-ENODEV);
25 char *path;
26
27 path = kasprintf(GFP_KERNEL, "/clocks/%s", name);
28 if (!path)
29 return ERR_PTR(-ENOMEM);
30
31 phandle.np = of_find_node_by_path(path);
32 kfree(path);
33
34 if (phandle.np) {
35 clk = of_clk_get_from_provider(&phandle);
36 of_node_put(phandle.np);
37 }
38 return clk;
39}
40
41struct clk * __init imx_obtain_fixed_clock(
42 const char *name, unsigned long rate)
43{
44 struct clk *clk;
45
46 clk = imx_obtain_fixed_clock_from_dt(name);
47 if (IS_ERR(clk))
48 clk = imx_clk_fixed(name, rate);
49 return clk;
50}
Liu Yingdfd87142013-07-04 17:57:17 +080051
52/*
53 * This fixups the register CCM_CSCMR1 write value.
54 * The write/read/divider values of the aclk_podf field
55 * of that register have the relationship described by
56 * the following table:
57 *
58 * write value read value divider
59 * 3b'000 3b'110 7
60 * 3b'001 3b'111 8
61 * 3b'010 3b'100 5
62 * 3b'011 3b'101 6
63 * 3b'100 3b'010 3
64 * 3b'101 3b'011 4
65 * 3b'110 3b'000 1
66 * 3b'111 3b'001 2(default)
67 *
68 * That's why we do the xor operation below.
69 */
70#define CSCMR1_FIXUP 0x00600000
71
72void imx_cscmr1_fixup(u32 *val)
73{
74 *val ^= CSCMR1_FIXUP;
75 return;
76}
Lucas Stach55adc612015-09-21 18:53:57 +020077
78static int imx_keep_uart_clocks __initdata;
79static struct clk ** const *imx_uart_clocks __initdata;
80
81static int __init imx_keep_uart_clocks_param(char *str)
82{
83 imx_keep_uart_clocks = 1;
84
85 return 0;
86}
87__setup_param("earlycon", imx_keep_uart_earlycon,
88 imx_keep_uart_clocks_param, 0);
89__setup_param("earlyprintk", imx_keep_uart_earlyprintk,
90 imx_keep_uart_clocks_param, 0);
91
92void __init imx_register_uart_clocks(struct clk ** const clks[])
93{
94 if (imx_keep_uart_clocks) {
95 int i;
96
97 imx_uart_clocks = clks;
98 for (i = 0; imx_uart_clocks[i]; i++)
99 clk_prepare_enable(*imx_uart_clocks[i]);
100 }
101}
102
103static int __init imx_clk_disable_uart(void)
104{
105 if (imx_keep_uart_clocks && imx_uart_clocks) {
106 int i;
107
108 for (i = 0; imx_uart_clocks[i]; i++)
109 clk_disable_unprepare(*imx_uart_clocks[i]);
110 }
111
112 return 0;
113}
114late_initcall_sync(imx_clk_disable_uart);