blob: be6c7fd8315df99de06c81ce66118761326a9290 [file] [log] [blame]
Heiko Stübnera245fec2014-07-03 01:58:39 +02001/*
2 * Copyright (c) 2014 MundoReader S.L.
3 * Author: Heiko Stuebner <heiko@sntech.de>
4 *
5 * based on
6 *
7 * samsung/clk.c
8 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
9 * Copyright (c) 2013 Linaro Ltd.
10 * Author: Thomas Abraham <thomas.ab@samsung.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 */
22
23#include <linux/slab.h>
24#include <linux/clk.h>
25#include <linux/clk-provider.h>
Heiko Stübner90c59022014-07-03 01:59:10 +020026#include <linux/mfd/syscon.h>
27#include <linux/regmap.h>
Heiko Stübner6f1294b2014-08-19 17:45:38 -070028#include <linux/reboot.h>
Heiko Stübnera245fec2014-07-03 01:58:39 +020029#include "clk.h"
30
31/**
32 * Register a clock branch.
33 * Most clock branches have a form like
34 *
35 * src1 --|--\
36 * |M |--[GATE]-[DIV]-
37 * src2 --|--/
38 *
39 * sometimes without one of those components.
40 */
Heiko Stübner1a4b1812014-08-27 00:54:56 +020041static struct clk *rockchip_clk_register_branch(const char *name,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +020042 const char *const *parent_names, u8 num_parents, void __iomem *base,
Heiko Stübnera245fec2014-07-03 01:58:39 +020043 int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
44 u8 div_shift, u8 div_width, u8 div_flags,
45 struct clk_div_table *div_table, int gate_offset,
46 u8 gate_shift, u8 gate_flags, unsigned long flags,
47 spinlock_t *lock)
48{
49 struct clk *clk;
50 struct clk_mux *mux = NULL;
51 struct clk_gate *gate = NULL;
52 struct clk_divider *div = NULL;
53 const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
54 *gate_ops = NULL;
55
56 if (num_parents > 1) {
57 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
58 if (!mux)
59 return ERR_PTR(-ENOMEM);
60
61 mux->reg = base + muxdiv_offset;
62 mux->shift = mux_shift;
63 mux->mask = BIT(mux_width) - 1;
64 mux->flags = mux_flags;
65 mux->lock = lock;
66 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
67 : &clk_mux_ops;
68 }
69
70 if (gate_offset >= 0) {
71 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
72 if (!gate)
73 return ERR_PTR(-ENOMEM);
74
75 gate->flags = gate_flags;
76 gate->reg = base + gate_offset;
77 gate->bit_idx = gate_shift;
78 gate->lock = lock;
79 gate_ops = &clk_gate_ops;
80 }
81
82 if (div_width > 0) {
83 div = kzalloc(sizeof(*div), GFP_KERNEL);
84 if (!div)
85 return ERR_PTR(-ENOMEM);
86
87 div->flags = div_flags;
88 div->reg = base + muxdiv_offset;
89 div->shift = div_shift;
90 div->width = div_width;
91 div->lock = lock;
92 div->table = div_table;
James Hogane6d5e7d2014-11-14 15:32:09 +000093 div_ops = &clk_divider_ops;
Heiko Stübnera245fec2014-07-03 01:58:39 +020094 }
95
96 clk = clk_register_composite(NULL, name, parent_names, num_parents,
97 mux ? &mux->hw : NULL, mux_ops,
98 div ? &div->hw : NULL, div_ops,
99 gate ? &gate->hw : NULL, gate_ops,
100 flags);
101
102 return clk;
103}
104
Heiko Stübnerb2155a72014-08-27 00:54:21 +0200105static struct clk *rockchip_clk_register_frac_branch(const char *name,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200106 const char *const *parent_names, u8 num_parents,
107 void __iomem *base, int muxdiv_offset, u8 div_flags,
Heiko Stübnerb2155a72014-08-27 00:54:21 +0200108 int gate_offset, u8 gate_shift, u8 gate_flags,
109 unsigned long flags, spinlock_t *lock)
110{
111 struct clk *clk;
112 struct clk_gate *gate = NULL;
113 struct clk_fractional_divider *div = NULL;
114 const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
115
116 if (gate_offset >= 0) {
117 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
118 if (!gate)
119 return ERR_PTR(-ENOMEM);
120
121 gate->flags = gate_flags;
122 gate->reg = base + gate_offset;
123 gate->bit_idx = gate_shift;
124 gate->lock = lock;
125 gate_ops = &clk_gate_ops;
126 }
127
128 if (muxdiv_offset < 0)
129 return ERR_PTR(-EINVAL);
130
131 div = kzalloc(sizeof(*div), GFP_KERNEL);
132 if (!div)
133 return ERR_PTR(-ENOMEM);
134
135 div->flags = div_flags;
136 div->reg = base + muxdiv_offset;
137 div->mshift = 16;
Andy Shevchenko5d49a6e2015-09-22 18:54:10 +0300138 div->mwidth = 16;
139 div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift;
Heiko Stübnerb2155a72014-08-27 00:54:21 +0200140 div->nshift = 0;
Andy Shevchenko5d49a6e2015-09-22 18:54:10 +0300141 div->nwidth = 16;
142 div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift;
Heiko Stübnerb2155a72014-08-27 00:54:21 +0200143 div->lock = lock;
144 div_ops = &clk_fractional_divider_ops;
145
146 clk = clk_register_composite(NULL, name, parent_names, num_parents,
147 NULL, NULL,
148 &div->hw, div_ops,
149 gate ? &gate->hw : NULL, gate_ops,
150 flags);
151
152 return clk;
153}
154
Heiko Stübnera245fec2014-07-03 01:58:39 +0200155static DEFINE_SPINLOCK(clk_lock);
156static struct clk **clk_table;
157static void __iomem *reg_base;
158static struct clk_onecell_data clk_data;
Heiko Stübner90c59022014-07-03 01:59:10 +0200159static struct device_node *cru_node;
160static struct regmap *grf;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200161
162void __init rockchip_clk_init(struct device_node *np, void __iomem *base,
163 unsigned long nr_clks)
164{
165 reg_base = base;
Heiko Stübner90c59022014-07-03 01:59:10 +0200166 cru_node = np;
167 grf = ERR_PTR(-EPROBE_DEFER);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200168
169 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
170 if (!clk_table)
171 pr_err("%s: could not allocate clock lookup table\n", __func__);
172
173 clk_data.clks = clk_table;
174 clk_data.clk_num = nr_clks;
175 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
176}
177
Heiko Stübner90c59022014-07-03 01:59:10 +0200178struct regmap *rockchip_clk_get_grf(void)
179{
180 if (IS_ERR(grf))
181 grf = syscon_regmap_lookup_by_phandle(cru_node, "rockchip,grf");
182 return grf;
183}
184
Heiko Stübnera245fec2014-07-03 01:58:39 +0200185void rockchip_clk_add_lookup(struct clk *clk, unsigned int id)
186{
187 if (clk_table && id)
188 clk_table[id] = clk;
189}
190
Heiko Stübner90c59022014-07-03 01:59:10 +0200191void __init rockchip_clk_register_plls(struct rockchip_pll_clock *list,
192 unsigned int nr_pll, int grf_lock_offset)
193{
194 struct clk *clk;
195 int idx;
196
197 for (idx = 0; idx < nr_pll; idx++, list++) {
198 clk = rockchip_clk_register_pll(list->type, list->name,
199 list->parent_names, list->num_parents,
200 reg_base, list->con_offset, grf_lock_offset,
201 list->lock_shift, list->mode_offset,
Heiko Stuebner4f8a7c52014-11-20 20:38:50 +0100202 list->mode_shift, list->rate_table,
203 list->pll_flags, &clk_lock);
Heiko Stübner90c59022014-07-03 01:59:10 +0200204 if (IS_ERR(clk)) {
205 pr_err("%s: failed to register clock %s\n", __func__,
206 list->name);
207 continue;
208 }
209
210 rockchip_clk_add_lookup(clk, list->id);
211 }
212}
213
Heiko Stübnera245fec2014-07-03 01:58:39 +0200214void __init rockchip_clk_register_branches(
215 struct rockchip_clk_branch *list,
216 unsigned int nr_clk)
217{
218 struct clk *clk = NULL;
219 unsigned int idx;
220 unsigned long flags;
221
222 for (idx = 0; idx < nr_clk; idx++, list++) {
223 flags = list->flags;
224
225 /* catch simple muxes */
226 switch (list->branch_type) {
227 case branch_mux:
228 clk = clk_register_mux(NULL, list->name,
229 list->parent_names, list->num_parents,
230 flags, reg_base + list->muxdiv_offset,
231 list->mux_shift, list->mux_width,
232 list->mux_flags, &clk_lock);
233 break;
234 case branch_divider:
235 if (list->div_table)
236 clk = clk_register_divider_table(NULL,
237 list->name, list->parent_names[0],
238 flags, reg_base + list->muxdiv_offset,
239 list->div_shift, list->div_width,
240 list->div_flags, list->div_table,
241 &clk_lock);
242 else
243 clk = clk_register_divider(NULL, list->name,
244 list->parent_names[0], flags,
245 reg_base + list->muxdiv_offset,
246 list->div_shift, list->div_width,
247 list->div_flags, &clk_lock);
248 break;
249 case branch_fraction_divider:
Heiko Stübnerb2155a72014-08-27 00:54:21 +0200250 clk = rockchip_clk_register_frac_branch(list->name,
251 list->parent_names, list->num_parents,
252 reg_base, list->muxdiv_offset, list->div_flags,
253 list->gate_offset, list->gate_shift,
254 list->gate_flags, flags, &clk_lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200255 break;
256 case branch_gate:
257 flags |= CLK_SET_RATE_PARENT;
258
Heiko Stübnera245fec2014-07-03 01:58:39 +0200259 clk = clk_register_gate(NULL, list->name,
260 list->parent_names[0], flags,
261 reg_base + list->gate_offset,
262 list->gate_shift, list->gate_flags, &clk_lock);
263 break;
264 case branch_composite:
Heiko Stübnera245fec2014-07-03 01:58:39 +0200265 clk = rockchip_clk_register_branch(list->name,
266 list->parent_names, list->num_parents,
267 reg_base, list->muxdiv_offset, list->mux_shift,
268 list->mux_width, list->mux_flags,
269 list->div_shift, list->div_width,
270 list->div_flags, list->div_table,
271 list->gate_offset, list->gate_shift,
272 list->gate_flags, flags, &clk_lock);
273 break;
Alexandru M Stan89bf26c2014-11-26 17:30:27 -0800274 case branch_mmc:
275 clk = rockchip_clk_register_mmc(
276 list->name,
277 list->parent_names, list->num_parents,
278 reg_base + list->muxdiv_offset,
279 list->div_shift
280 );
281 break;
Heiko Stuebner8a76f442015-07-05 11:00:14 +0200282 case branch_inverter:
283 clk = rockchip_clk_register_inverter(
284 list->name, list->parent_names,
285 list->num_parents,
286 reg_base + list->muxdiv_offset,
287 list->div_shift, list->div_flags, &clk_lock);
288 break;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200289 }
290
291 /* none of the cases above matched */
292 if (!clk) {
293 pr_err("%s: unknown clock type %d\n",
294 __func__, list->branch_type);
295 continue;
296 }
297
298 if (IS_ERR(clk)) {
299 pr_err("%s: failed to register clock %s: %ld\n",
300 __func__, list->name, PTR_ERR(clk));
301 continue;
302 }
303
304 rockchip_clk_add_lookup(clk, list->id);
305 }
306}
Heiko Stübnerfe94f972014-08-14 23:00:26 +0200307
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200308void __init rockchip_clk_register_armclk(unsigned int lookup_id,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200309 const char *name, const char *const *parent_names,
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200310 u8 num_parents,
311 const struct rockchip_cpuclk_reg_data *reg_data,
312 const struct rockchip_cpuclk_rate_table *rates,
313 int nrates)
314{
315 struct clk *clk;
316
317 clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
318 reg_data, rates, nrates, reg_base,
319 &clk_lock);
320 if (IS_ERR(clk)) {
321 pr_err("%s: failed to register clock %s: %ld\n",
322 __func__, name, PTR_ERR(clk));
323 return;
324 }
325
326 rockchip_clk_add_lookup(clk, lookup_id);
327}
328
Uwe Kleine-König692d8322015-02-18 10:59:45 +0100329void __init rockchip_clk_protect_critical(const char *const clocks[],
330 int nclocks)
Heiko Stübnerfe94f972014-08-14 23:00:26 +0200331{
332 int i;
333
334 /* Protect the clocks that needs to stay on */
335 for (i = 0; i < nclocks; i++) {
336 struct clk *clk = __clk_lookup(clocks[i]);
337
338 if (clk)
339 clk_prepare_enable(clk);
340 }
341}
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700342
343static unsigned int reg_restart;
344static int rockchip_restart_notify(struct notifier_block *this,
345 unsigned long mode, void *cmd)
346{
347 writel(0xfdb9, reg_base + reg_restart);
348 return NOTIFY_DONE;
349}
350
351static struct notifier_block rockchip_restart_handler = {
352 .notifier_call = rockchip_restart_notify,
353 .priority = 128,
354};
355
356void __init rockchip_register_restart_notifier(unsigned int reg)
357{
358 int ret;
359
360 reg_restart = reg;
361 ret = register_restart_handler(&rockchip_restart_handler);
362 if (ret)
363 pr_err("%s: cannot register restart handler, %d\n",
364 __func__, ret);
365}