blob: 39cabe157163b9b0dd3498c65a242fe1614253ee [file] [log] [blame]
Mike Turquette9d9f78e2012-03-15 23:11:20 -07001/*
2 * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
3 * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
4 * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Simple multiplexer clock implementation
11 */
12
Mike Turquette9d9f78e2012-03-15 23:11:20 -070013#include <linux/clk-provider.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/err.h>
18
19/*
20 * DOC: basic adjustable multiplexer clock that cannot gate
21 *
22 * Traits of this clock:
23 * prepare - clk_prepare only ensures that parents are prepared
24 * enable - clk_enable only ensures that parents are enabled
25 * rate - rate is only affected by parent switching. No clk_set_rate support
26 * parent - parent is adjustable through clk_set_parent
27 */
28
Mike Turquette9d9f78e2012-03-15 23:11:20 -070029static u8 clk_mux_get_parent(struct clk_hw *hw)
30{
31 struct clk_mux *mux = to_clk_mux(hw);
Stephen Boyd497295a2015-06-25 16:53:23 -070032 int num_parents = clk_hw_get_num_parents(hw);
Mike Turquette9d9f78e2012-03-15 23:11:20 -070033 u32 val;
34
35 /*
36 * FIXME need a mux-specific flag to determine if val is bitwise or numeric
37 * e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges from 0x1
38 * to 0x7 (index starts at one)
39 * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
40 * val = 0x4 really means "bit 2, index starts at bit 0"
41 */
Gerhard Sittigaa514ce2013-07-22 14:14:40 +020042 val = clk_readl(mux->reg) >> mux->shift;
Peter De Schrijverce4f3312013-03-22 14:07:53 +020043 val &= mux->mask;
44
45 if (mux->table) {
46 int i;
47
48 for (i = 0; i < num_parents; i++)
49 if (mux->table[i] == val)
50 return i;
51 return -EINVAL;
52 }
Mike Turquette9d9f78e2012-03-15 23:11:20 -070053
54 if (val && (mux->flags & CLK_MUX_INDEX_BIT))
55 val = ffs(val) - 1;
56
57 if (val && (mux->flags & CLK_MUX_INDEX_ONE))
58 val--;
59
Peter De Schrijverce4f3312013-03-22 14:07:53 +020060 if (val >= num_parents)
Mike Turquette9d9f78e2012-03-15 23:11:20 -070061 return -EINVAL;
62
63 return val;
64}
Mike Turquette9d9f78e2012-03-15 23:11:20 -070065
66static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
67{
68 struct clk_mux *mux = to_clk_mux(hw);
69 u32 val;
70 unsigned long flags = 0;
71
Masahiro Yamada3837bd22015-11-05 17:59:39 +090072 if (mux->table) {
Peter De Schrijverce4f3312013-03-22 14:07:53 +020073 index = mux->table[index];
Masahiro Yamada3837bd22015-11-05 17:59:39 +090074 } else {
Peter De Schrijverce4f3312013-03-22 14:07:53 +020075 if (mux->flags & CLK_MUX_INDEX_BIT)
Hans de Goede6793b3c2014-11-19 14:48:59 +010076 index = 1 << index;
Peter De Schrijverce4f3312013-03-22 14:07:53 +020077
78 if (mux->flags & CLK_MUX_INDEX_ONE)
79 index++;
80 }
Mike Turquette9d9f78e2012-03-15 23:11:20 -070081
82 if (mux->lock)
83 spin_lock_irqsave(mux->lock, flags);
Stephen Boyd661e2182015-07-24 12:21:12 -070084 else
85 __acquire(mux->lock);
Mike Turquette9d9f78e2012-03-15 23:11:20 -070086
Haojian Zhuangba492e92013-06-08 22:47:17 +080087 if (mux->flags & CLK_MUX_HIWORD_MASK) {
88 val = mux->mask << (mux->shift + 16);
89 } else {
Gerhard Sittigaa514ce2013-07-22 14:14:40 +020090 val = clk_readl(mux->reg);
Haojian Zhuangba492e92013-06-08 22:47:17 +080091 val &= ~(mux->mask << mux->shift);
92 }
Mike Turquette9d9f78e2012-03-15 23:11:20 -070093 val |= index << mux->shift;
Gerhard Sittigaa514ce2013-07-22 14:14:40 +020094 clk_writel(val, mux->reg);
Mike Turquette9d9f78e2012-03-15 23:11:20 -070095
96 if (mux->lock)
97 spin_unlock_irqrestore(mux->lock, flags);
Stephen Boyd661e2182015-07-24 12:21:12 -070098 else
99 __release(mux->lock);
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700100
101 return 0;
102}
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700103
Shawn Guo822c2502012-03-27 15:23:22 +0800104const struct clk_ops clk_mux_ops = {
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700105 .get_parent = clk_mux_get_parent,
106 .set_parent = clk_mux_set_parent,
James Hogane366fdd2013-07-29 12:25:02 +0100107 .determine_rate = __clk_mux_determine_rate,
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700108};
109EXPORT_SYMBOL_GPL(clk_mux_ops);
110
Tomasz Figac57acd12013-07-23 01:49:18 +0200111const struct clk_ops clk_mux_ro_ops = {
112 .get_parent = clk_mux_get_parent,
113};
114EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
115
Stephen Boyd264b3172016-02-07 00:05:48 -0800116struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
Sascha Hauer2893c372015-03-31 20:16:52 +0200117 const char * const *parent_names, u8 num_parents,
118 unsigned long flags,
Peter De Schrijverce4f3312013-03-22 14:07:53 +0200119 void __iomem *reg, u8 shift, u32 mask,
120 u8 clk_mux_flags, u32 *table, spinlock_t *lock)
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700121{
122 struct clk_mux *mux;
Stephen Boyd264b3172016-02-07 00:05:48 -0800123 struct clk_hw *hw;
Saravana Kannan0197b3e2012-04-25 22:58:56 -0700124 struct clk_init_data init;
Haojian Zhuangba492e92013-06-08 22:47:17 +0800125 u8 width = 0;
Stephen Boyd264b3172016-02-07 00:05:48 -0800126 int ret;
Haojian Zhuangba492e92013-06-08 22:47:17 +0800127
128 if (clk_mux_flags & CLK_MUX_HIWORD_MASK) {
129 width = fls(mask) - ffs(mask) + 1;
130 if (width + shift > 16) {
131 pr_err("mux value exceeds LOWORD field\n");
132 return ERR_PTR(-EINVAL);
133 }
134 }
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700135
Mike Turquette27d54592012-03-26 17:51:03 -0700136 /* allocate the mux */
Markus Elfring1e287332017-09-26 17:30:06 +0200137 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
Markus Elfring0b910402017-09-26 17:23:04 +0200138 if (!mux)
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700139 return ERR_PTR(-ENOMEM);
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700140
Saravana Kannan0197b3e2012-04-25 22:58:56 -0700141 init.name = name;
Tomasz Figac57acd12013-07-23 01:49:18 +0200142 if (clk_mux_flags & CLK_MUX_READ_ONLY)
143 init.ops = &clk_mux_ro_ops;
144 else
145 init.ops = &clk_mux_ops;
Rajendra Nayakf7d8caa2012-06-01 14:02:47 +0530146 init.flags = flags | CLK_IS_BASIC;
Saravana Kannan0197b3e2012-04-25 22:58:56 -0700147 init.parent_names = parent_names;
148 init.num_parents = num_parents;
149
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700150 /* struct clk_mux assignments */
151 mux->reg = reg;
152 mux->shift = shift;
Peter De Schrijverce4f3312013-03-22 14:07:53 +0200153 mux->mask = mask;
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700154 mux->flags = clk_mux_flags;
155 mux->lock = lock;
Peter De Schrijverce4f3312013-03-22 14:07:53 +0200156 mux->table = table;
Mike Turquette31df9db2012-05-06 18:48:11 -0700157 mux->hw.init = &init;
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700158
Stephen Boyd264b3172016-02-07 00:05:48 -0800159 hw = &mux->hw;
160 ret = clk_hw_register(dev, hw);
161 if (ret) {
Mike Turquette27d54592012-03-26 17:51:03 -0700162 kfree(mux);
Stephen Boyd264b3172016-02-07 00:05:48 -0800163 hw = ERR_PTR(ret);
164 }
Mike Turquette27d54592012-03-26 17:51:03 -0700165
Stephen Boyd264b3172016-02-07 00:05:48 -0800166 return hw;
167}
168EXPORT_SYMBOL_GPL(clk_hw_register_mux_table);
169
170struct clk *clk_register_mux_table(struct device *dev, const char *name,
171 const char * const *parent_names, u8 num_parents,
172 unsigned long flags,
173 void __iomem *reg, u8 shift, u32 mask,
174 u8 clk_mux_flags, u32 *table, spinlock_t *lock)
175{
176 struct clk_hw *hw;
177
178 hw = clk_hw_register_mux_table(dev, name, parent_names, num_parents,
179 flags, reg, shift, mask, clk_mux_flags,
180 table, lock);
181 if (IS_ERR(hw))
182 return ERR_CAST(hw);
183 return hw->clk;
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700184}
Mike Turquette5cfe10b2013-08-15 19:06:29 -0700185EXPORT_SYMBOL_GPL(clk_register_mux_table);
Peter De Schrijverce4f3312013-03-22 14:07:53 +0200186
187struct clk *clk_register_mux(struct device *dev, const char *name,
Sascha Hauer2893c372015-03-31 20:16:52 +0200188 const char * const *parent_names, u8 num_parents,
189 unsigned long flags,
Peter De Schrijverce4f3312013-03-22 14:07:53 +0200190 void __iomem *reg, u8 shift, u8 width,
191 u8 clk_mux_flags, spinlock_t *lock)
192{
193 u32 mask = BIT(width) - 1;
194
195 return clk_register_mux_table(dev, name, parent_names, num_parents,
196 flags, reg, shift, mask, clk_mux_flags,
197 NULL, lock);
198}
Mike Turquette5cfe10b2013-08-15 19:06:29 -0700199EXPORT_SYMBOL_GPL(clk_register_mux);
Krzysztof Kozlowski4e3c0212015-01-05 10:52:40 +0100200
Stephen Boyd264b3172016-02-07 00:05:48 -0800201struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name,
202 const char * const *parent_names, u8 num_parents,
203 unsigned long flags,
204 void __iomem *reg, u8 shift, u8 width,
205 u8 clk_mux_flags, spinlock_t *lock)
206{
207 u32 mask = BIT(width) - 1;
208
209 return clk_hw_register_mux_table(dev, name, parent_names, num_parents,
210 flags, reg, shift, mask, clk_mux_flags,
211 NULL, lock);
212}
213EXPORT_SYMBOL_GPL(clk_hw_register_mux);
214
Krzysztof Kozlowski4e3c0212015-01-05 10:52:40 +0100215void clk_unregister_mux(struct clk *clk)
216{
217 struct clk_mux *mux;
218 struct clk_hw *hw;
219
220 hw = __clk_get_hw(clk);
221 if (!hw)
222 return;
223
224 mux = to_clk_mux(hw);
225
226 clk_unregister(clk);
227 kfree(mux);
228}
229EXPORT_SYMBOL_GPL(clk_unregister_mux);
Stephen Boyd264b3172016-02-07 00:05:48 -0800230
231void clk_hw_unregister_mux(struct clk_hw *hw)
232{
233 struct clk_mux *mux;
234
235 mux = to_clk_mux(hw);
236
237 clk_hw_unregister(hw);
238 kfree(mux);
239}
240EXPORT_SYMBOL_GPL(clk_hw_unregister_mux);