blob: 24938815655fa847512bf2e970589e112a7d35f7 [file] [log] [blame]
Heiko Stübnera245fec2014-07-03 01:58:39 +02001/*
2 * Copyright (c) 2014 MundoReader S.L.
3 * Author: Heiko Stuebner <heiko@sntech.de>
4 *
5 * based on
6 *
7 * samsung/clk.c
8 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
9 * Copyright (c) 2013 Linaro Ltd.
10 * Author: Thomas Abraham <thomas.ab@samsung.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 */
22
23#include <linux/slab.h>
24#include <linux/clk.h>
25#include <linux/clk-provider.h>
Heiko Stübner90c59022014-07-03 01:59:10 +020026#include <linux/mfd/syscon.h>
27#include <linux/regmap.h>
Heiko Stübner6f1294b2014-08-19 17:45:38 -070028#include <linux/reboot.h>
Heiko Stübnera245fec2014-07-03 01:58:39 +020029#include "clk.h"
30
31/**
32 * Register a clock branch.
33 * Most clock branches have a form like
34 *
35 * src1 --|--\
36 * |M |--[GATE]-[DIV]-
37 * src2 --|--/
38 *
39 * sometimes without one of those components.
40 */
Heiko Stübner1a4b1812014-08-27 00:54:56 +020041static struct clk *rockchip_clk_register_branch(const char *name,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +020042 const char *const *parent_names, u8 num_parents, void __iomem *base,
Heiko Stübnera245fec2014-07-03 01:58:39 +020043 int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
44 u8 div_shift, u8 div_width, u8 div_flags,
45 struct clk_div_table *div_table, int gate_offset,
46 u8 gate_shift, u8 gate_flags, unsigned long flags,
47 spinlock_t *lock)
48{
49 struct clk *clk;
50 struct clk_mux *mux = NULL;
51 struct clk_gate *gate = NULL;
52 struct clk_divider *div = NULL;
53 const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
54 *gate_ops = NULL;
55
56 if (num_parents > 1) {
57 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
58 if (!mux)
59 return ERR_PTR(-ENOMEM);
60
61 mux->reg = base + muxdiv_offset;
62 mux->shift = mux_shift;
63 mux->mask = BIT(mux_width) - 1;
64 mux->flags = mux_flags;
65 mux->lock = lock;
66 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
67 : &clk_mux_ops;
68 }
69
70 if (gate_offset >= 0) {
71 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
72 if (!gate)
73 return ERR_PTR(-ENOMEM);
74
75 gate->flags = gate_flags;
76 gate->reg = base + gate_offset;
77 gate->bit_idx = gate_shift;
78 gate->lock = lock;
79 gate_ops = &clk_gate_ops;
80 }
81
82 if (div_width > 0) {
83 div = kzalloc(sizeof(*div), GFP_KERNEL);
84 if (!div)
85 return ERR_PTR(-ENOMEM);
86
87 div->flags = div_flags;
88 div->reg = base + muxdiv_offset;
89 div->shift = div_shift;
90 div->width = div_width;
91 div->lock = lock;
92 div->table = div_table;
James Hogane6d5e7d2014-11-14 15:32:09 +000093 div_ops = &clk_divider_ops;
Heiko Stübnera245fec2014-07-03 01:58:39 +020094 }
95
96 clk = clk_register_composite(NULL, name, parent_names, num_parents,
97 mux ? &mux->hw : NULL, mux_ops,
98 div ? &div->hw : NULL, div_ops,
99 gate ? &gate->hw : NULL, gate_ops,
100 flags);
101
102 return clk;
103}
104
Heiko Stübnerb2155a72014-08-27 00:54:21 +0200105static struct clk *rockchip_clk_register_frac_branch(const char *name,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200106 const char *const *parent_names, u8 num_parents,
107 void __iomem *base, int muxdiv_offset, u8 div_flags,
Heiko Stübnerb2155a72014-08-27 00:54:21 +0200108 int gate_offset, u8 gate_shift, u8 gate_flags,
109 unsigned long flags, spinlock_t *lock)
110{
111 struct clk *clk;
112 struct clk_gate *gate = NULL;
113 struct clk_fractional_divider *div = NULL;
114 const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
115
116 if (gate_offset >= 0) {
117 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
118 if (!gate)
119 return ERR_PTR(-ENOMEM);
120
121 gate->flags = gate_flags;
122 gate->reg = base + gate_offset;
123 gate->bit_idx = gate_shift;
124 gate->lock = lock;
125 gate_ops = &clk_gate_ops;
126 }
127
128 if (muxdiv_offset < 0)
129 return ERR_PTR(-EINVAL);
130
131 div = kzalloc(sizeof(*div), GFP_KERNEL);
132 if (!div)
133 return ERR_PTR(-ENOMEM);
134
135 div->flags = div_flags;
136 div->reg = base + muxdiv_offset;
137 div->mshift = 16;
138 div->mmask = 0xffff0000;
139 div->nshift = 0;
140 div->nmask = 0xffff;
141 div->lock = lock;
142 div_ops = &clk_fractional_divider_ops;
143
144 clk = clk_register_composite(NULL, name, parent_names, num_parents,
145 NULL, NULL,
146 &div->hw, div_ops,
147 gate ? &gate->hw : NULL, gate_ops,
148 flags);
149
150 return clk;
151}
152
Heiko Stübnera245fec2014-07-03 01:58:39 +0200153static DEFINE_SPINLOCK(clk_lock);
154static struct clk **clk_table;
155static void __iomem *reg_base;
156static struct clk_onecell_data clk_data;
Heiko Stübner90c59022014-07-03 01:59:10 +0200157static struct device_node *cru_node;
158static struct regmap *grf;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200159
160void __init rockchip_clk_init(struct device_node *np, void __iomem *base,
161 unsigned long nr_clks)
162{
163 reg_base = base;
Heiko Stübner90c59022014-07-03 01:59:10 +0200164 cru_node = np;
165 grf = ERR_PTR(-EPROBE_DEFER);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200166
167 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
168 if (!clk_table)
169 pr_err("%s: could not allocate clock lookup table\n", __func__);
170
171 clk_data.clks = clk_table;
172 clk_data.clk_num = nr_clks;
173 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
174}
175
Heiko Stübner90c59022014-07-03 01:59:10 +0200176struct regmap *rockchip_clk_get_grf(void)
177{
178 if (IS_ERR(grf))
179 grf = syscon_regmap_lookup_by_phandle(cru_node, "rockchip,grf");
180 return grf;
181}
182
Heiko Stübnera245fec2014-07-03 01:58:39 +0200183void rockchip_clk_add_lookup(struct clk *clk, unsigned int id)
184{
185 if (clk_table && id)
186 clk_table[id] = clk;
187}
188
Heiko Stübner90c59022014-07-03 01:59:10 +0200189void __init rockchip_clk_register_plls(struct rockchip_pll_clock *list,
190 unsigned int nr_pll, int grf_lock_offset)
191{
192 struct clk *clk;
193 int idx;
194
195 for (idx = 0; idx < nr_pll; idx++, list++) {
196 clk = rockchip_clk_register_pll(list->type, list->name,
197 list->parent_names, list->num_parents,
198 reg_base, list->con_offset, grf_lock_offset,
199 list->lock_shift, list->mode_offset,
Heiko Stuebner4f8a7c52014-11-20 20:38:50 +0100200 list->mode_shift, list->rate_table,
201 list->pll_flags, &clk_lock);
Heiko Stübner90c59022014-07-03 01:59:10 +0200202 if (IS_ERR(clk)) {
203 pr_err("%s: failed to register clock %s\n", __func__,
204 list->name);
205 continue;
206 }
207
208 rockchip_clk_add_lookup(clk, list->id);
209 }
210}
211
Heiko Stübnera245fec2014-07-03 01:58:39 +0200212void __init rockchip_clk_register_branches(
213 struct rockchip_clk_branch *list,
214 unsigned int nr_clk)
215{
216 struct clk *clk = NULL;
217 unsigned int idx;
218 unsigned long flags;
219
220 for (idx = 0; idx < nr_clk; idx++, list++) {
221 flags = list->flags;
222
223 /* catch simple muxes */
224 switch (list->branch_type) {
225 case branch_mux:
226 clk = clk_register_mux(NULL, list->name,
227 list->parent_names, list->num_parents,
228 flags, reg_base + list->muxdiv_offset,
229 list->mux_shift, list->mux_width,
230 list->mux_flags, &clk_lock);
231 break;
232 case branch_divider:
233 if (list->div_table)
234 clk = clk_register_divider_table(NULL,
235 list->name, list->parent_names[0],
236 flags, reg_base + list->muxdiv_offset,
237 list->div_shift, list->div_width,
238 list->div_flags, list->div_table,
239 &clk_lock);
240 else
241 clk = clk_register_divider(NULL, list->name,
242 list->parent_names[0], flags,
243 reg_base + list->muxdiv_offset,
244 list->div_shift, list->div_width,
245 list->div_flags, &clk_lock);
246 break;
247 case branch_fraction_divider:
Heiko Stübnerb2155a72014-08-27 00:54:21 +0200248 clk = rockchip_clk_register_frac_branch(list->name,
249 list->parent_names, list->num_parents,
250 reg_base, list->muxdiv_offset, list->div_flags,
251 list->gate_offset, list->gate_shift,
252 list->gate_flags, flags, &clk_lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200253 break;
254 case branch_gate:
255 flags |= CLK_SET_RATE_PARENT;
256
Heiko Stübnera245fec2014-07-03 01:58:39 +0200257 clk = clk_register_gate(NULL, list->name,
258 list->parent_names[0], flags,
259 reg_base + list->gate_offset,
260 list->gate_shift, list->gate_flags, &clk_lock);
261 break;
262 case branch_composite:
Heiko Stübnera245fec2014-07-03 01:58:39 +0200263 clk = rockchip_clk_register_branch(list->name,
264 list->parent_names, list->num_parents,
265 reg_base, list->muxdiv_offset, list->mux_shift,
266 list->mux_width, list->mux_flags,
267 list->div_shift, list->div_width,
268 list->div_flags, list->div_table,
269 list->gate_offset, list->gate_shift,
270 list->gate_flags, flags, &clk_lock);
271 break;
Alexandru M Stan89bf26c2014-11-26 17:30:27 -0800272 case branch_mmc:
273 clk = rockchip_clk_register_mmc(
274 list->name,
275 list->parent_names, list->num_parents,
276 reg_base + list->muxdiv_offset,
277 list->div_shift
278 );
279 break;
Heiko Stuebner8a76f442015-07-05 11:00:14 +0200280 case branch_inverter:
281 clk = rockchip_clk_register_inverter(
282 list->name, list->parent_names,
283 list->num_parents,
284 reg_base + list->muxdiv_offset,
285 list->div_shift, list->div_flags, &clk_lock);
286 break;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200287 }
288
289 /* none of the cases above matched */
290 if (!clk) {
291 pr_err("%s: unknown clock type %d\n",
292 __func__, list->branch_type);
293 continue;
294 }
295
296 if (IS_ERR(clk)) {
297 pr_err("%s: failed to register clock %s: %ld\n",
298 __func__, list->name, PTR_ERR(clk));
299 continue;
300 }
301
302 rockchip_clk_add_lookup(clk, list->id);
303 }
304}
Heiko Stübnerfe94f972014-08-14 23:00:26 +0200305
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200306void __init rockchip_clk_register_armclk(unsigned int lookup_id,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200307 const char *name, const char *const *parent_names,
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200308 u8 num_parents,
309 const struct rockchip_cpuclk_reg_data *reg_data,
310 const struct rockchip_cpuclk_rate_table *rates,
311 int nrates)
312{
313 struct clk *clk;
314
315 clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
316 reg_data, rates, nrates, reg_base,
317 &clk_lock);
318 if (IS_ERR(clk)) {
319 pr_err("%s: failed to register clock %s: %ld\n",
320 __func__, name, PTR_ERR(clk));
321 return;
322 }
323
324 rockchip_clk_add_lookup(clk, lookup_id);
325}
326
Uwe Kleine-König692d8322015-02-18 10:59:45 +0100327void __init rockchip_clk_protect_critical(const char *const clocks[],
328 int nclocks)
Heiko Stübnerfe94f972014-08-14 23:00:26 +0200329{
330 int i;
331
332 /* Protect the clocks that needs to stay on */
333 for (i = 0; i < nclocks; i++) {
334 struct clk *clk = __clk_lookup(clocks[i]);
335
336 if (clk)
337 clk_prepare_enable(clk);
338 }
339}
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700340
341static unsigned int reg_restart;
342static int rockchip_restart_notify(struct notifier_block *this,
343 unsigned long mode, void *cmd)
344{
345 writel(0xfdb9, reg_base + reg_restart);
346 return NOTIFY_DONE;
347}
348
349static struct notifier_block rockchip_restart_handler = {
350 .notifier_call = rockchip_restart_notify,
351 .priority = 128,
352};
353
354void __init rockchip_register_restart_notifier(unsigned int reg)
355{
356 int ret;
357
358 reg_restart = reg;
359 ret = register_restart_handler(&rockchip_restart_handler);
360 if (ret)
361 pr_err("%s: cannot register restart handler, %d\n",
362 __func__, ret);
363}