blob: a75df6cb141fdf1466baa323507da29d28e78617 [file] [log] [blame]
Sakari Ailusc3b5b022010-03-01 05:14:18 -03001/*
2 * v4l2-event.c
3 *
4 * V4L2 events.
5 *
6 * Copyright (C) 2009--2010 Nokia Corporation.
7 *
Sakari Ailus8c5dff92012-10-28 06:44:17 -03008 * Contact: Sakari Ailus <sakari.ailus@iki.fi>
Sakari Ailusc3b5b022010-03-01 05:14:18 -03009 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
Sakari Ailusc3b5b022010-03-01 05:14:18 -030018 */
19
20#include <media/v4l2-dev.h>
21#include <media/v4l2-fh.h>
22#include <media/v4l2-event.h>
23
24#include <linux/sched.h>
25#include <linux/slab.h>
Paul Gortmaker35a24632011-08-01 15:26:38 -040026#include <linux/export.h>
Sakari Ailusc3b5b022010-03-01 05:14:18 -030027
Hans Verkuilf1e393d2011-06-13 19:24:17 -030028static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
Sakari Ailusc3b5b022010-03-01 05:14:18 -030029{
Hans Verkuilf1e393d2011-06-13 19:24:17 -030030 idx += sev->first;
31 return idx >= sev->elems ? idx - sev->elems : idx;
Sakari Ailusc3b5b022010-03-01 05:14:18 -030032}
Sakari Ailusc3b5b022010-03-01 05:14:18 -030033
34static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
35{
Sakari Ailusc3b5b022010-03-01 05:14:18 -030036 struct v4l2_kevent *kev;
37 unsigned long flags;
38
39 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
40
Hans Verkuil523f46d2011-06-13 17:44:42 -030041 if (list_empty(&fh->available)) {
Sakari Ailusc3b5b022010-03-01 05:14:18 -030042 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
43 return -ENOENT;
44 }
45
Hans Verkuil523f46d2011-06-13 17:44:42 -030046 WARN_ON(fh->navailable == 0);
Sakari Ailusc3b5b022010-03-01 05:14:18 -030047
Hans Verkuil523f46d2011-06-13 17:44:42 -030048 kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
Hans Verkuilf1e393d2011-06-13 19:24:17 -030049 list_del(&kev->list);
Hans Verkuil523f46d2011-06-13 17:44:42 -030050 fh->navailable--;
Sakari Ailusc3b5b022010-03-01 05:14:18 -030051
Hans Verkuil523f46d2011-06-13 17:44:42 -030052 kev->event.pending = fh->navailable;
Sakari Ailusc3b5b022010-03-01 05:14:18 -030053 *event = kev->event;
Hans Verkuilf1e393d2011-06-13 19:24:17 -030054 kev->sev->first = sev_pos(kev->sev, 1);
55 kev->sev->in_use--;
Sakari Ailusc3b5b022010-03-01 05:14:18 -030056
57 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
58
59 return 0;
60}
61
62int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
63 int nonblocking)
64{
Sakari Ailusc3b5b022010-03-01 05:14:18 -030065 int ret;
66
67 if (nonblocking)
68 return __v4l2_event_dequeue(fh, event);
69
Hans Verkuilee6869a2010-09-26 08:47:38 -030070 /* Release the vdev lock while waiting */
71 if (fh->vdev->lock)
72 mutex_unlock(fh->vdev->lock);
73
Sakari Ailusc3b5b022010-03-01 05:14:18 -030074 do {
Hans Verkuil523f46d2011-06-13 17:44:42 -030075 ret = wait_event_interruptible(fh->wait,
76 fh->navailable != 0);
Sakari Ailusc3b5b022010-03-01 05:14:18 -030077 if (ret < 0)
Hans Verkuilee6869a2010-09-26 08:47:38 -030078 break;
Sakari Ailusc3b5b022010-03-01 05:14:18 -030079
80 ret = __v4l2_event_dequeue(fh, event);
81 } while (ret == -ENOENT);
82
Hans Verkuilee6869a2010-09-26 08:47:38 -030083 if (fh->vdev->lock)
84 mutex_lock(fh->vdev->lock);
85
Sakari Ailusc3b5b022010-03-01 05:14:18 -030086 return ret;
87}
Laurent Pinchart0a4f8d02010-05-02 14:32:43 -030088EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
Sakari Ailusc3b5b022010-03-01 05:14:18 -030089
Hans Verkuil6e239392011-06-07 11:13:44 -030090/* Caller must hold fh->vdev->fh_lock! */
Sakari Ailusc3b5b022010-03-01 05:14:18 -030091static struct v4l2_subscribed_event *v4l2_event_subscribed(
Hans Verkuil6e239392011-06-07 11:13:44 -030092 struct v4l2_fh *fh, u32 type, u32 id)
Sakari Ailusc3b5b022010-03-01 05:14:18 -030093{
Sakari Ailusc3b5b022010-03-01 05:14:18 -030094 struct v4l2_subscribed_event *sev;
95
Sakari Ailusf3cd3852010-05-03 12:42:46 -030096 assert_spin_locked(&fh->vdev->fh_lock);
Sakari Ailusc3b5b022010-03-01 05:14:18 -030097
Hans Verkuil3f66f0e2011-06-20 11:56:24 -030098 list_for_each_entry(sev, &fh->subscribed, list)
Hans Verkuil6e239392011-06-07 11:13:44 -030099 if (sev->type == type && sev->id == id)
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300100 return sev;
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300101
102 return NULL;
103}
104
Hans Verkuil6e239392011-06-07 11:13:44 -0300105static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
106 const struct timespec *ts)
107{
Hans Verkuil6e239392011-06-07 11:13:44 -0300108 struct v4l2_subscribed_event *sev;
109 struct v4l2_kevent *kev;
Hans Verkuil2151bdc2011-06-18 07:02:20 -0300110 bool copy_payload = true;
Hans Verkuil6e239392011-06-07 11:13:44 -0300111
112 /* Are we subscribed? */
113 sev = v4l2_event_subscribed(fh, ev->type, ev->id);
114 if (sev == NULL)
115 return;
116
Hans de Goedec53c2542012-04-08 12:59:46 -0300117 /*
118 * If the event has been added to the fh->subscribed list, but its
119 * add op has not completed yet elems will be 0, treat this as
120 * not being subscribed.
121 */
122 if (!sev->elems)
123 return;
124
Hans Verkuil6e239392011-06-07 11:13:44 -0300125 /* Increase event sequence number on fh. */
Hans Verkuil523f46d2011-06-13 17:44:42 -0300126 fh->sequence++;
Hans Verkuil6e239392011-06-07 11:13:44 -0300127
128 /* Do we have any free events? */
Hans Verkuilf1e393d2011-06-13 19:24:17 -0300129 if (sev->in_use == sev->elems) {
130 /* no, remove the oldest one */
131 kev = sev->events + sev_pos(sev, 0);
132 list_del(&kev->list);
133 sev->in_use--;
134 sev->first = sev_pos(sev, 1);
135 fh->navailable--;
Hans Verkuil2151bdc2011-06-18 07:02:20 -0300136 if (sev->elems == 1) {
Hans de Goedec53c2542012-04-08 12:59:46 -0300137 if (sev->ops && sev->ops->replace) {
138 sev->ops->replace(&kev->event, ev);
Hans Verkuil2151bdc2011-06-18 07:02:20 -0300139 copy_payload = false;
140 }
Hans de Goedec53c2542012-04-08 12:59:46 -0300141 } else if (sev->ops && sev->ops->merge) {
Hans Verkuil2151bdc2011-06-18 07:02:20 -0300142 struct v4l2_kevent *second_oldest =
143 sev->events + sev_pos(sev, 0);
Hans de Goedec53c2542012-04-08 12:59:46 -0300144 sev->ops->merge(&kev->event, &second_oldest->event);
Hans Verkuil2151bdc2011-06-18 07:02:20 -0300145 }
Hans Verkuilf1e393d2011-06-13 19:24:17 -0300146 }
Hans Verkuil6e239392011-06-07 11:13:44 -0300147
148 /* Take one and fill it. */
Hans Verkuilf1e393d2011-06-13 19:24:17 -0300149 kev = sev->events + sev_pos(sev, sev->in_use);
Hans Verkuil6e239392011-06-07 11:13:44 -0300150 kev->event.type = ev->type;
Hans Verkuil2151bdc2011-06-18 07:02:20 -0300151 if (copy_payload)
152 kev->event.u = ev->u;
Hans Verkuil6e239392011-06-07 11:13:44 -0300153 kev->event.id = ev->id;
154 kev->event.timestamp = *ts;
Hans Verkuil523f46d2011-06-13 17:44:42 -0300155 kev->event.sequence = fh->sequence;
Hans Verkuilf1e393d2011-06-13 19:24:17 -0300156 sev->in_use++;
157 list_add_tail(&kev->list, &fh->available);
Hans Verkuil6e239392011-06-07 11:13:44 -0300158
Hans Verkuil523f46d2011-06-13 17:44:42 -0300159 fh->navailable++;
Hans Verkuil6e239392011-06-07 11:13:44 -0300160
Hans Verkuil523f46d2011-06-13 17:44:42 -0300161 wake_up_all(&fh->wait);
Hans Verkuil6e239392011-06-07 11:13:44 -0300162}
163
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300164void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
165{
166 struct v4l2_fh *fh;
167 unsigned long flags;
168 struct timespec timestamp;
169
Hans Verkuilfb8dfda2015-06-23 06:20:23 -0300170 if (vdev == NULL)
171 return;
172
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300173 ktime_get_ts(&timestamp);
174
175 spin_lock_irqsave(&vdev->fh_lock, flags);
176
Hans Verkuil3f66f0e2011-06-20 11:56:24 -0300177 list_for_each_entry(fh, &vdev->fh_list, list)
Hans Verkuil6e239392011-06-07 11:13:44 -0300178 __v4l2_event_queue_fh(fh, ev, &timestamp);
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300179
180 spin_unlock_irqrestore(&vdev->fh_lock, flags);
181}
182EXPORT_SYMBOL_GPL(v4l2_event_queue);
183
Hans Verkuil6e239392011-06-07 11:13:44 -0300184void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
185{
186 unsigned long flags;
187 struct timespec timestamp;
188
189 ktime_get_ts(&timestamp);
190
191 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
192 __v4l2_event_queue_fh(fh, ev, &timestamp);
193 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
194}
195EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
196
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300197int v4l2_event_pending(struct v4l2_fh *fh)
198{
Hans Verkuil523f46d2011-06-13 17:44:42 -0300199 return fh->navailable;
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300200}
201EXPORT_SYMBOL_GPL(v4l2_event_pending);
202
203int v4l2_event_subscribe(struct v4l2_fh *fh,
Hans Verkuil85f5fe32012-09-04 11:46:09 -0300204 const struct v4l2_event_subscription *sub, unsigned elems,
Hans de Goedec53c2542012-04-08 12:59:46 -0300205 const struct v4l2_subscribed_event_ops *ops)
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300206{
Hans Verkuil6e239392011-06-07 11:13:44 -0300207 struct v4l2_subscribed_event *sev, *found_ev;
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300208 unsigned long flags;
Hans Verkuilf1e393d2011-06-13 19:24:17 -0300209 unsigned i;
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300210
Hans de Goedeb36b5052011-10-24 05:03:27 -0300211 if (sub->type == V4L2_EVENT_ALL)
212 return -EINVAL;
213
Hans Verkuilf1e393d2011-06-13 19:24:17 -0300214 if (elems < 1)
215 elems = 1;
Hans Verkuil6e239392011-06-07 11:13:44 -0300216
Hans Verkuilf1e393d2011-06-13 19:24:17 -0300217 sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300218 if (!sev)
219 return -ENOMEM;
Hans Verkuilf1e393d2011-06-13 19:24:17 -0300220 for (i = 0; i < elems; i++)
221 sev->events[i].sev = sev;
222 sev->type = sub->type;
223 sev->id = sub->id;
224 sev->flags = sub->flags;
225 sev->fh = fh;
Hans de Goedec53c2542012-04-08 12:59:46 -0300226 sev->ops = ops;
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300227
228 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
Hans Verkuil6e239392011-06-07 11:13:44 -0300229 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
Hans Verkuilf1e393d2011-06-13 19:24:17 -0300230 if (!found_ev)
Hans Verkuil523f46d2011-06-13 17:44:42 -0300231 list_add(&sev->list, &fh->subscribed);
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300232 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
233
Hans de Goedec53c2542012-04-08 12:59:46 -0300234 if (found_ev) {
Hans Verkuil77068d32011-06-13 18:55:58 -0300235 kfree(sev);
Hans de Goedec53c2542012-04-08 12:59:46 -0300236 return 0; /* Already listening */
237 }
238
239 if (sev->ops && sev->ops->add) {
Hans Verkuil6e6d76c2012-05-07 16:53:20 -0300240 int ret = sev->ops->add(sev, elems);
Hans de Goedec53c2542012-04-08 12:59:46 -0300241 if (ret) {
242 sev->ops = NULL;
243 v4l2_event_unsubscribe(fh, sub);
244 return ret;
245 }
246 }
247
Hans de Goedec53c2542012-04-08 12:59:46 -0300248 /* Mark as ready for use */
249 sev->elems = elems;
250
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300251 return 0;
252}
253EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
254
Hans Verkuilf1e393d2011-06-13 19:24:17 -0300255void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300256{
Hans Verkuil6e239392011-06-07 11:13:44 -0300257 struct v4l2_event_subscription sub;
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300258 struct v4l2_subscribed_event *sev;
259 unsigned long flags;
260
261 do {
262 sev = NULL;
263
264 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
Hans Verkuil523f46d2011-06-13 17:44:42 -0300265 if (!list_empty(&fh->subscribed)) {
266 sev = list_first_entry(&fh->subscribed,
Hans Verkuil6e239392011-06-07 11:13:44 -0300267 struct v4l2_subscribed_event, list);
268 sub.type = sev->type;
269 sub.id = sev->id;
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300270 }
271 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
Hans Verkuil6e239392011-06-07 11:13:44 -0300272 if (sev)
273 v4l2_event_unsubscribe(fh, &sub);
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300274 } while (sev);
275}
Hans Verkuilf1e393d2011-06-13 19:24:17 -0300276EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300277
278int v4l2_event_unsubscribe(struct v4l2_fh *fh,
Hans Verkuil85f5fe32012-09-04 11:46:09 -0300279 const struct v4l2_event_subscription *sub)
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300280{
281 struct v4l2_subscribed_event *sev;
282 unsigned long flags;
Hans de Goede78c87e82011-10-26 05:40:27 -0300283 int i;
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300284
285 if (sub->type == V4L2_EVENT_ALL) {
286 v4l2_event_unsubscribe_all(fh);
287 return 0;
288 }
289
290 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
291
Hans Verkuil6e239392011-06-07 11:13:44 -0300292 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
Hans Verkuil77068d32011-06-13 18:55:58 -0300293 if (sev != NULL) {
Hans de Goede78c87e82011-10-26 05:40:27 -0300294 /* Remove any pending events for this subscription */
295 for (i = 0; i < sev->in_use; i++) {
296 list_del(&sev->events[sev_pos(sev, i)].list);
297 fh->navailable--;
298 }
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300299 list_del(&sev->list);
Hans Verkuil77068d32011-06-13 18:55:58 -0300300 }
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300301
302 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
Hans de Goedec53c2542012-04-08 12:59:46 -0300303
304 if (sev && sev->ops && sev->ops->del)
305 sev->ops->del(sev);
306
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300307 kfree(sev);
308
309 return 0;
310}
311EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
Sylwester Nawrocki4f4d14b2013-01-22 18:58:57 -0300312
313int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
314 struct v4l2_event_subscription *sub)
315{
316 return v4l2_event_unsubscribe(fh, sub);
317}
318EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
Arun Kumar K3cbe6e52014-05-14 03:59:42 -0300319
320static void v4l2_event_src_replace(struct v4l2_event *old,
321 const struct v4l2_event *new)
322{
323 u32 old_changes = old->u.src_change.changes;
324
325 old->u.src_change = new->u.src_change;
326 old->u.src_change.changes |= old_changes;
327}
328
329static void v4l2_event_src_merge(const struct v4l2_event *old,
330 struct v4l2_event *new)
331{
332 new->u.src_change.changes |= old->u.src_change.changes;
333}
334
335static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
336 .replace = v4l2_event_src_replace,
337 .merge = v4l2_event_src_merge,
338};
339
340int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
341 const struct v4l2_event_subscription *sub)
342{
343 if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
344 return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
345 return -EINVAL;
346}
347EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
348
349int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
350 struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
351{
352 return v4l2_src_change_event_subscribe(fh, sub);
353}
354EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);