1 /*
2 * v4l2-event.c
3 *
4 * V4L2 events.
5 *
6 * Copyright (C) 2009--2010 Nokia Corporation.
7 *
8 * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 * 02110-1301 USA
23 */
24
25 #include <media/v4l2-dev.h>
26 #include <media/v4l2-fh.h>
27 #include <media/v4l2-event.h>
28 #include <media/v4l2-ctrls.h>
29
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/export.h>
33
sev_pos(const struct v4l2_subscribed_event * sev,unsigned idx)34 static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
35 {
36 idx += sev->first;
37 return idx >= sev->elems ? idx - sev->elems : idx;
38 }
39
__v4l2_event_dequeue(struct v4l2_fh * fh,struct v4l2_event * event)40 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
41 {
42 struct v4l2_kevent *kev;
43 unsigned long flags;
44
45 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
46
47 if (list_empty(&fh->available)) {
48 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
49 return -ENOENT;
50 }
51
52 WARN_ON(fh->navailable == 0);
53
54 kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
55 list_del(&kev->list);
56 fh->navailable--;
57
58 kev->event.pending = fh->navailable;
59 *event = kev->event;
60 kev->sev->first = sev_pos(kev->sev, 1);
61 kev->sev->in_use--;
62
63 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
64
65 return 0;
66 }
67
v4l2_event_dequeue(struct v4l2_fh * fh,struct v4l2_event * event,int nonblocking)68 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
69 int nonblocking)
70 {
71 int ret;
72
73 if (nonblocking)
74 return __v4l2_event_dequeue(fh, event);
75
76 /* Release the vdev lock while waiting */
77 if (fh->vdev->lock)
78 mutex_unlock(fh->vdev->lock);
79
80 do {
81 ret = wait_event_interruptible(fh->wait,
82 fh->navailable != 0);
83 if (ret < 0)
84 break;
85
86 ret = __v4l2_event_dequeue(fh, event);
87 } while (ret == -ENOENT);
88
89 if (fh->vdev->lock)
90 mutex_lock(fh->vdev->lock);
91
92 return ret;
93 }
94 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
95
96 /* Caller must hold fh->vdev->fh_lock! */
v4l2_event_subscribed(struct v4l2_fh * fh,u32 type,u32 id)97 static struct v4l2_subscribed_event *v4l2_event_subscribed(
98 struct v4l2_fh *fh, u32 type, u32 id)
99 {
100 struct v4l2_subscribed_event *sev;
101
102 assert_spin_locked(&fh->vdev->fh_lock);
103
104 list_for_each_entry(sev, &fh->subscribed, list)
105 if (sev->type == type && sev->id == id)
106 return sev;
107
108 return NULL;
109 }
110
__v4l2_event_queue_fh(struct v4l2_fh * fh,const struct v4l2_event * ev,const struct timespec * ts)111 static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
112 const struct timespec *ts)
113 {
114 struct v4l2_subscribed_event *sev;
115 struct v4l2_kevent *kev;
116 bool copy_payload = true;
117
118 /* Are we subscribed? */
119 sev = v4l2_event_subscribed(fh, ev->type, ev->id);
120 if (sev == NULL)
121 return;
122
123 /* Increase event sequence number on fh. */
124 fh->sequence++;
125
126 /* Do we have any free events? */
127 if (sev->in_use == sev->elems) {
128 /* no, remove the oldest one */
129 kev = sev->events + sev_pos(sev, 0);
130 list_del(&kev->list);
131 sev->in_use--;
132 sev->first = sev_pos(sev, 1);
133 fh->navailable--;
134 if (sev->elems == 1) {
135 if (sev->replace) {
136 sev->replace(&kev->event, ev);
137 copy_payload = false;
138 }
139 } else if (sev->merge) {
140 struct v4l2_kevent *second_oldest =
141 sev->events + sev_pos(sev, 0);
142 sev->merge(&kev->event, &second_oldest->event);
143 }
144 }
145
146 /* Take one and fill it. */
147 kev = sev->events + sev_pos(sev, sev->in_use);
148 kev->event.type = ev->type;
149 if (copy_payload)
150 kev->event.u = ev->u;
151 kev->event.id = ev->id;
152 kev->event.timestamp = *ts;
153 kev->event.sequence = fh->sequence;
154 sev->in_use++;
155 list_add_tail(&kev->list, &fh->available);
156
157 fh->navailable++;
158
159 wake_up_all(&fh->wait);
160 }
161
v4l2_event_queue(struct video_device * vdev,const struct v4l2_event * ev)162 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
163 {
164 struct v4l2_fh *fh;
165 unsigned long flags;
166 struct timespec timestamp;
167
168 ktime_get_ts(×tamp);
169
170 spin_lock_irqsave(&vdev->fh_lock, flags);
171
172 list_for_each_entry(fh, &vdev->fh_list, list)
173 __v4l2_event_queue_fh(fh, ev, ×tamp);
174
175 spin_unlock_irqrestore(&vdev->fh_lock, flags);
176 }
177 EXPORT_SYMBOL_GPL(v4l2_event_queue);
178
v4l2_event_queue_fh(struct v4l2_fh * fh,const struct v4l2_event * ev)179 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
180 {
181 unsigned long flags;
182 struct timespec timestamp;
183
184 ktime_get_ts(×tamp);
185
186 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
187 __v4l2_event_queue_fh(fh, ev, ×tamp);
188 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
189 }
190 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
191
v4l2_event_pending(struct v4l2_fh * fh)192 int v4l2_event_pending(struct v4l2_fh *fh)
193 {
194 return fh->navailable;
195 }
196 EXPORT_SYMBOL_GPL(v4l2_event_pending);
197
ctrls_replace(struct v4l2_event * old,const struct v4l2_event * new)198 static void ctrls_replace(struct v4l2_event *old, const struct v4l2_event *new)
199 {
200 u32 old_changes = old->u.ctrl.changes;
201
202 old->u.ctrl = new->u.ctrl;
203 old->u.ctrl.changes |= old_changes;
204 }
205
ctrls_merge(const struct v4l2_event * old,struct v4l2_event * new)206 static void ctrls_merge(const struct v4l2_event *old, struct v4l2_event *new)
207 {
208 new->u.ctrl.changes |= old->u.ctrl.changes;
209 }
210
v4l2_event_subscribe(struct v4l2_fh * fh,struct v4l2_event_subscription * sub,unsigned elems)211 int v4l2_event_subscribe(struct v4l2_fh *fh,
212 struct v4l2_event_subscription *sub, unsigned elems)
213 {
214 struct v4l2_subscribed_event *sev, *found_ev;
215 struct v4l2_ctrl *ctrl = NULL;
216 unsigned long flags;
217 unsigned i;
218
219 if (sub->type == V4L2_EVENT_ALL)
220 return -EINVAL;
221
222 if (elems < 1)
223 elems = 1;
224 if (sub->type == V4L2_EVENT_CTRL) {
225 ctrl = v4l2_ctrl_find(fh->ctrl_handler, sub->id);
226 if (ctrl == NULL)
227 return -EINVAL;
228 }
229
230 sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
231 if (!sev)
232 return -ENOMEM;
233 for (i = 0; i < elems; i++)
234 sev->events[i].sev = sev;
235 sev->type = sub->type;
236 sev->id = sub->id;
237 sev->flags = sub->flags;
238 sev->fh = fh;
239 sev->elems = elems;
240 if (ctrl) {
241 sev->replace = ctrls_replace;
242 sev->merge = ctrls_merge;
243 }
244
245 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
246 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
247 if (!found_ev)
248 list_add(&sev->list, &fh->subscribed);
249 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
250
251 /* v4l2_ctrl_add_event uses a mutex, so do this outside the spin lock */
252 if (found_ev)
253 kfree(sev);
254 else if (ctrl)
255 v4l2_ctrl_add_event(ctrl, sev);
256
257 return 0;
258 }
259 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
260
v4l2_event_unsubscribe_all(struct v4l2_fh * fh)261 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
262 {
263 struct v4l2_event_subscription sub;
264 struct v4l2_subscribed_event *sev;
265 unsigned long flags;
266
267 do {
268 sev = NULL;
269
270 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
271 if (!list_empty(&fh->subscribed)) {
272 sev = list_first_entry(&fh->subscribed,
273 struct v4l2_subscribed_event, list);
274 sub.type = sev->type;
275 sub.id = sev->id;
276 }
277 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
278 if (sev)
279 v4l2_event_unsubscribe(fh, &sub);
280 } while (sev);
281 }
282 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
283
v4l2_event_unsubscribe(struct v4l2_fh * fh,struct v4l2_event_subscription * sub)284 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
285 struct v4l2_event_subscription *sub)
286 {
287 struct v4l2_subscribed_event *sev;
288 unsigned long flags;
289 int i;
290
291 if (sub->type == V4L2_EVENT_ALL) {
292 v4l2_event_unsubscribe_all(fh);
293 return 0;
294 }
295
296 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
297
298 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
299 if (sev != NULL) {
300 /* Remove any pending events for this subscription */
301 for (i = 0; i < sev->in_use; i++) {
302 list_del(&sev->events[sev_pos(sev, i)].list);
303 fh->navailable--;
304 }
305 list_del(&sev->list);
306 }
307
308 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
309 if (sev && sev->type == V4L2_EVENT_CTRL) {
310 struct v4l2_ctrl *ctrl = v4l2_ctrl_find(fh->ctrl_handler, sev->id);
311
312 if (ctrl)
313 v4l2_ctrl_del_event(ctrl, sev);
314 }
315
316 kfree(sev);
317
318 return 0;
319 }
320 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
321