6 * Copyright (C) 2009--2010 Nokia Corporation.
8 * Contact: Sakari Ailus <sakari.ailus@iki.fi>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
20 #include <media/v4l2-dev.h>
21 #include <media/v4l2-fh.h>
22 #include <media/v4l2-event.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/export.h>
28 static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
31 return idx >= sev->elems ? idx - sev->elems : idx;
34 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
36 struct v4l2_kevent *kev;
39 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
41 if (list_empty(&fh->available)) {
42 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
46 WARN_ON(fh->navailable == 0);
48 kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
52 kev->event.pending = fh->navailable;
54 kev->sev->first = sev_pos(kev->sev, 1);
57 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
62 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
68 return __v4l2_event_dequeue(fh, event);
70 /* Release the vdev lock while waiting */
72 mutex_unlock(fh->vdev->lock);
75 ret = wait_event_interruptible(fh->wait,
80 ret = __v4l2_event_dequeue(fh, event);
81 } while (ret == -ENOENT);
84 mutex_lock(fh->vdev->lock);
88 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
90 /* Caller must hold fh->vdev->fh_lock! */
91 static struct v4l2_subscribed_event *v4l2_event_subscribed(
92 struct v4l2_fh *fh, u32 type, u32 id)
94 struct v4l2_subscribed_event *sev;
96 assert_spin_locked(&fh->vdev->fh_lock);
98 list_for_each_entry(sev, &fh->subscribed, list)
99 if (sev->type == type && sev->id == id)
105 static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
106 const struct timespec *ts)
108 struct v4l2_subscribed_event *sev;
109 struct v4l2_kevent *kev;
110 bool copy_payload = true;
112 /* Are we subscribed? */
113 sev = v4l2_event_subscribed(fh, ev->type, ev->id);
118 * If the event has been added to the fh->subscribed list, but its
119 * add op has not completed yet elems will be 0, treat this as
120 * not being subscribed.
125 /* Increase event sequence number on fh. */
128 /* Do we have any free events? */
129 if (sev->in_use == sev->elems) {
130 /* no, remove the oldest one */
131 kev = sev->events + sev_pos(sev, 0);
132 list_del(&kev->list);
134 sev->first = sev_pos(sev, 1);
136 if (sev->elems == 1) {
137 if (sev->ops && sev->ops->replace) {
138 sev->ops->replace(&kev->event, ev);
139 copy_payload = false;
141 } else if (sev->ops && sev->ops->merge) {
142 struct v4l2_kevent *second_oldest =
143 sev->events + sev_pos(sev, 0);
144 sev->ops->merge(&kev->event, &second_oldest->event);
148 /* Take one and fill it. */
149 kev = sev->events + sev_pos(sev, sev->in_use);
150 kev->event.type = ev->type;
152 kev->event.u = ev->u;
153 kev->event.id = ev->id;
154 kev->event.timestamp = *ts;
155 kev->event.sequence = fh->sequence;
157 list_add_tail(&kev->list, &fh->available);
161 wake_up_all(&fh->wait);
164 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
168 struct timespec timestamp;
173 ktime_get_ts(×tamp);
175 spin_lock_irqsave(&vdev->fh_lock, flags);
177 list_for_each_entry(fh, &vdev->fh_list, list)
178 __v4l2_event_queue_fh(fh, ev, ×tamp);
180 spin_unlock_irqrestore(&vdev->fh_lock, flags);
182 EXPORT_SYMBOL_GPL(v4l2_event_queue);
184 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
187 struct timespec timestamp;
189 ktime_get_ts(×tamp);
191 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
192 __v4l2_event_queue_fh(fh, ev, ×tamp);
193 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
195 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
197 int v4l2_event_pending(struct v4l2_fh *fh)
199 return fh->navailable;
201 EXPORT_SYMBOL_GPL(v4l2_event_pending);
203 int v4l2_event_subscribe(struct v4l2_fh *fh,
204 const struct v4l2_event_subscription *sub, unsigned elems,
205 const struct v4l2_subscribed_event_ops *ops)
207 struct v4l2_subscribed_event *sev, *found_ev;
211 if (sub->type == V4L2_EVENT_ALL)
217 sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
220 for (i = 0; i < elems; i++)
221 sev->events[i].sev = sev;
222 sev->type = sub->type;
224 sev->flags = sub->flags;
228 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
229 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
231 list_add(&sev->list, &fh->subscribed);
232 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
236 return 0; /* Already listening */
239 if (sev->ops && sev->ops->add) {
240 int ret = sev->ops->add(sev, elems);
243 v4l2_event_unsubscribe(fh, sub);
248 /* Mark as ready for use */
253 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
255 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
257 struct v4l2_event_subscription sub;
258 struct v4l2_subscribed_event *sev;
264 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
265 if (!list_empty(&fh->subscribed)) {
266 sev = list_first_entry(&fh->subscribed,
267 struct v4l2_subscribed_event, list);
268 sub.type = sev->type;
271 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
273 v4l2_event_unsubscribe(fh, &sub);
276 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
278 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
279 const struct v4l2_event_subscription *sub)
281 struct v4l2_subscribed_event *sev;
285 if (sub->type == V4L2_EVENT_ALL) {
286 v4l2_event_unsubscribe_all(fh);
290 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
292 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
294 /* Remove any pending events for this subscription */
295 for (i = 0; i < sev->in_use; i++) {
296 list_del(&sev->events[sev_pos(sev, i)].list);
299 list_del(&sev->list);
302 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
304 if (sev && sev->ops && sev->ops->del)
311 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
313 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
314 struct v4l2_event_subscription *sub)
316 return v4l2_event_unsubscribe(fh, sub);
318 EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
320 static void v4l2_event_src_replace(struct v4l2_event *old,
321 const struct v4l2_event *new)
323 u32 old_changes = old->u.src_change.changes;
325 old->u.src_change = new->u.src_change;
326 old->u.src_change.changes |= old_changes;
329 static void v4l2_event_src_merge(const struct v4l2_event *old,
330 struct v4l2_event *new)
332 new->u.src_change.changes |= old->u.src_change.changes;
335 static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
336 .replace = v4l2_event_src_replace,
337 .merge = v4l2_event_src_merge,
340 int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
341 const struct v4l2_event_subscription *sub)
343 if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
344 return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
347 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
349 int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
350 struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
352 return v4l2_src_change_event_subscribe(fh, sub);
354 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);