blob: 3ff0b0093b0e30994fd53b156b398a789291e58d [file] [log] [blame]
Guennadi Liakhovetskiff5430d2012-12-04 07:42:15 -03001/*
2 * V4L2 clock service
3 *
4 * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/atomic.h>
12#include <linux/device.h>
13#include <linux/errno.h>
14#include <linux/list.h>
15#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/slab.h>
18#include <linux/string.h>
19
20#include <media/v4l2-clk.h>
21#include <media/v4l2-subdev.h>
22
23static DEFINE_MUTEX(clk_lock);
24static LIST_HEAD(clk_list);
25
Guennadi Liakhovetskia37462b2015-01-31 20:21:32 -030026static struct v4l2_clk *v4l2_clk_find(const char *dev_id)
Guennadi Liakhovetskiff5430d2012-12-04 07:42:15 -030027{
28 struct v4l2_clk *clk;
29
Guennadi Liakhovetskia37462b2015-01-31 20:21:32 -030030 list_for_each_entry(clk, &clk_list, list)
31 if (!strcmp(dev_id, clk->dev_id))
Guennadi Liakhovetskiff5430d2012-12-04 07:42:15 -030032 return clk;
Guennadi Liakhovetskiff5430d2012-12-04 07:42:15 -030033
34 return ERR_PTR(-ENODEV);
35}
36
37struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id)
38{
39 struct v4l2_clk *clk;
40
41 mutex_lock(&clk_lock);
Guennadi Liakhovetskia37462b2015-01-31 20:21:32 -030042 clk = v4l2_clk_find(dev_name(dev));
Guennadi Liakhovetskiff5430d2012-12-04 07:42:15 -030043
44 if (!IS_ERR(clk))
45 atomic_inc(&clk->use_count);
46 mutex_unlock(&clk_lock);
47
48 return clk;
49}
50EXPORT_SYMBOL(v4l2_clk_get);
51
52void v4l2_clk_put(struct v4l2_clk *clk)
53{
54 struct v4l2_clk *tmp;
55
56 if (IS_ERR(clk))
57 return;
58
59 mutex_lock(&clk_lock);
60
61 list_for_each_entry(tmp, &clk_list, list)
62 if (tmp == clk)
63 atomic_dec(&clk->use_count);
64
65 mutex_unlock(&clk_lock);
66}
67EXPORT_SYMBOL(v4l2_clk_put);
68
69static int v4l2_clk_lock_driver(struct v4l2_clk *clk)
70{
71 struct v4l2_clk *tmp;
72 int ret = -ENODEV;
73
74 mutex_lock(&clk_lock);
75
76 list_for_each_entry(tmp, &clk_list, list)
77 if (tmp == clk) {
78 ret = !try_module_get(clk->ops->owner);
79 if (ret)
80 ret = -EFAULT;
81 break;
82 }
83
84 mutex_unlock(&clk_lock);
85
86 return ret;
87}
88
89static void v4l2_clk_unlock_driver(struct v4l2_clk *clk)
90{
91 module_put(clk->ops->owner);
92}
93
94int v4l2_clk_enable(struct v4l2_clk *clk)
95{
96 int ret = v4l2_clk_lock_driver(clk);
97
98 if (ret < 0)
99 return ret;
100
101 mutex_lock(&clk->lock);
102
103 if (++clk->enable == 1 && clk->ops->enable) {
104 ret = clk->ops->enable(clk);
105 if (ret < 0)
106 clk->enable--;
107 }
108
109 mutex_unlock(&clk->lock);
110
111 return ret;
112}
113EXPORT_SYMBOL(v4l2_clk_enable);
114
115/*
116 * You might Oops if you try to disabled a disabled clock, because then the
117 * driver isn't locked and could have been unloaded by now, so, don't do that
118 */
119void v4l2_clk_disable(struct v4l2_clk *clk)
120{
121 int enable;
122
123 mutex_lock(&clk->lock);
124
125 enable = --clk->enable;
Guennadi Liakhovetskia37462b2015-01-31 20:21:32 -0300126 if (WARN(enable < 0, "Unbalanced %s() on %s!\n", __func__,
127 clk->dev_id))
Guennadi Liakhovetskiff5430d2012-12-04 07:42:15 -0300128 clk->enable++;
129 else if (!enable && clk->ops->disable)
130 clk->ops->disable(clk);
131
132 mutex_unlock(&clk->lock);
133
134 v4l2_clk_unlock_driver(clk);
135}
136EXPORT_SYMBOL(v4l2_clk_disable);
137
138unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk)
139{
140 int ret = v4l2_clk_lock_driver(clk);
141
142 if (ret < 0)
143 return ret;
144
145 mutex_lock(&clk->lock);
146 if (!clk->ops->get_rate)
147 ret = -ENOSYS;
148 else
149 ret = clk->ops->get_rate(clk);
150 mutex_unlock(&clk->lock);
151
152 v4l2_clk_unlock_driver(clk);
153
154 return ret;
155}
156EXPORT_SYMBOL(v4l2_clk_get_rate);
157
158int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate)
159{
160 int ret = v4l2_clk_lock_driver(clk);
161
162 if (ret < 0)
163 return ret;
164
165 mutex_lock(&clk->lock);
166 if (!clk->ops->set_rate)
167 ret = -ENOSYS;
168 else
169 ret = clk->ops->set_rate(clk, rate);
170 mutex_unlock(&clk->lock);
171
172 v4l2_clk_unlock_driver(clk);
173
174 return ret;
175}
176EXPORT_SYMBOL(v4l2_clk_set_rate);
177
178struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
179 const char *dev_id,
Guennadi Liakhovetskia37462b2015-01-31 20:21:32 -0300180 void *priv)
Guennadi Liakhovetskiff5430d2012-12-04 07:42:15 -0300181{
182 struct v4l2_clk *clk;
183 int ret;
184
185 if (!ops || !dev_id)
186 return ERR_PTR(-EINVAL);
187
188 clk = kzalloc(sizeof(struct v4l2_clk), GFP_KERNEL);
189 if (!clk)
190 return ERR_PTR(-ENOMEM);
191
Guennadi Liakhovetskiff5430d2012-12-04 07:42:15 -0300192 clk->dev_id = kstrdup(dev_id, GFP_KERNEL);
Guennadi Liakhovetskia37462b2015-01-31 20:21:32 -0300193 if (!clk->dev_id) {
Guennadi Liakhovetskiff5430d2012-12-04 07:42:15 -0300194 ret = -ENOMEM;
195 goto ealloc;
196 }
197 clk->ops = ops;
198 clk->priv = priv;
199 atomic_set(&clk->use_count, 0);
200 mutex_init(&clk->lock);
201
202 mutex_lock(&clk_lock);
Guennadi Liakhovetskia37462b2015-01-31 20:21:32 -0300203 if (!IS_ERR(v4l2_clk_find(dev_id))) {
Guennadi Liakhovetskiff5430d2012-12-04 07:42:15 -0300204 mutex_unlock(&clk_lock);
205 ret = -EEXIST;
206 goto eexist;
207 }
208 list_add_tail(&clk->list, &clk_list);
209 mutex_unlock(&clk_lock);
210
211 return clk;
212
213eexist:
214ealloc:
Guennadi Liakhovetskiff5430d2012-12-04 07:42:15 -0300215 kfree(clk->dev_id);
216 kfree(clk);
217 return ERR_PTR(ret);
218}
219EXPORT_SYMBOL(v4l2_clk_register);
220
221void v4l2_clk_unregister(struct v4l2_clk *clk)
222{
223 if (WARN(atomic_read(&clk->use_count),
Guennadi Liakhovetskia37462b2015-01-31 20:21:32 -0300224 "%s(): Refusing to unregister ref-counted %s clock!\n",
225 __func__, clk->dev_id))
Guennadi Liakhovetskiff5430d2012-12-04 07:42:15 -0300226 return;
227
228 mutex_lock(&clk_lock);
229 list_del(&clk->list);
230 mutex_unlock(&clk_lock);
231
Guennadi Liakhovetskiff5430d2012-12-04 07:42:15 -0300232 kfree(clk->dev_id);
233 kfree(clk);
234}
235EXPORT_SYMBOL(v4l2_clk_unregister);
Guennadi Liakhovetskicf326df2013-08-28 10:28:26 -0300236
237struct v4l2_clk_fixed {
238 unsigned long rate;
239 struct v4l2_clk_ops ops;
240};
241
242static unsigned long fixed_get_rate(struct v4l2_clk *clk)
243{
244 struct v4l2_clk_fixed *priv = clk->priv;
245 return priv->rate;
246}
247
248struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
Guennadi Liakhovetskia37462b2015-01-31 20:21:32 -0300249 unsigned long rate, struct module *owner)
Guennadi Liakhovetskicf326df2013-08-28 10:28:26 -0300250{
251 struct v4l2_clk *clk;
252 struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
253
254 if (!priv)
255 return ERR_PTR(-ENOMEM);
256
257 priv->rate = rate;
258 priv->ops.get_rate = fixed_get_rate;
259 priv->ops.owner = owner;
260
Guennadi Liakhovetskia37462b2015-01-31 20:21:32 -0300261 clk = v4l2_clk_register(&priv->ops, dev_id, priv);
Guennadi Liakhovetskicf326df2013-08-28 10:28:26 -0300262 if (IS_ERR(clk))
263 kfree(priv);
264
265 return clk;
266}
267EXPORT_SYMBOL(__v4l2_clk_register_fixed);
268
269void v4l2_clk_unregister_fixed(struct v4l2_clk *clk)
270{
271 kfree(clk->priv);
272 v4l2_clk_unregister(clk);
273}
274EXPORT_SYMBOL(v4l2_clk_unregister_fixed);