1 /*
2  * v4l2-event.c
3  *
4  * V4L2 events.
5  *
6  * Copyright (C) 2009--2010 Nokia Corporation.
7  *
8  * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * version 2 as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22  * 02110-1301 USA
23  */
24 
25 #include <media/v4l2-dev.h>
26 #include <media/v4l2-fh.h>
27 #include <media/v4l2-event.h>
28 
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 
v4l2_event_init(struct v4l2_fh * fh)32 int v4l2_event_init(struct v4l2_fh *fh)
33 {
34 	fh->events = kzalloc(sizeof(*fh->events), GFP_KERNEL);
35 	if (fh->events == NULL)
36 		return -ENOMEM;
37 
38 	init_waitqueue_head(&fh->events->wait);
39 
40 	INIT_LIST_HEAD(&fh->events->free);
41 	INIT_LIST_HEAD(&fh->events->available);
42 	INIT_LIST_HEAD(&fh->events->subscribed);
43 
44 	fh->events->sequence = -1;
45 
46 	return 0;
47 }
48 EXPORT_SYMBOL_GPL(v4l2_event_init);
49 
v4l2_event_alloc(struct v4l2_fh * fh,unsigned int n)50 int v4l2_event_alloc(struct v4l2_fh *fh, unsigned int n)
51 {
52 	struct v4l2_events *events = fh->events;
53 	unsigned long flags;
54 
55 	if (!events) {
56 		WARN_ON(1);
57 		return -ENOMEM;
58 	}
59 
60 	while (events->nallocated < n) {
61 		struct v4l2_kevent *kev;
62 
63 		kev = kzalloc(sizeof(*kev), GFP_KERNEL);
64 		if (kev == NULL)
65 			return -ENOMEM;
66 
67 		spin_lock_irqsave(&fh->vdev->fh_lock, flags);
68 		list_add_tail(&kev->list, &events->free);
69 		events->nallocated++;
70 		spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
71 	}
72 
73 	return 0;
74 }
75 EXPORT_SYMBOL_GPL(v4l2_event_alloc);
76 
77 #define list_kfree(list, type, member)				\
78 	while (!list_empty(list)) {				\
79 		type *hi;					\
80 		hi = list_first_entry(list, type, member);	\
81 		list_del(&hi->member);				\
82 		kfree(hi);					\
83 	}
84 
v4l2_event_free(struct v4l2_fh * fh)85 void v4l2_event_free(struct v4l2_fh *fh)
86 {
87 	struct v4l2_events *events = fh->events;
88 
89 	if (!events)
90 		return;
91 
92 	list_kfree(&events->free, struct v4l2_kevent, list);
93 	list_kfree(&events->available, struct v4l2_kevent, list);
94 	list_kfree(&events->subscribed, struct v4l2_subscribed_event, list);
95 
96 	kfree(events);
97 	fh->events = NULL;
98 }
99 EXPORT_SYMBOL_GPL(v4l2_event_free);
100 
__v4l2_event_dequeue(struct v4l2_fh * fh,struct v4l2_event * event)101 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
102 {
103 	struct v4l2_events *events = fh->events;
104 	struct v4l2_kevent *kev;
105 	unsigned long flags;
106 
107 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
108 
109 	if (list_empty(&events->available)) {
110 		spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
111 		return -ENOENT;
112 	}
113 
114 	WARN_ON(events->navailable == 0);
115 
116 	kev = list_first_entry(&events->available, struct v4l2_kevent, list);
117 	list_move(&kev->list, &events->free);
118 	events->navailable--;
119 
120 	kev->event.pending = events->navailable;
121 	*event = kev->event;
122 
123 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
124 
125 	return 0;
126 }
127 
v4l2_event_dequeue(struct v4l2_fh * fh,struct v4l2_event * event,int nonblocking)128 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
129 		       int nonblocking)
130 {
131 	struct v4l2_events *events = fh->events;
132 	int ret;
133 
134 	if (nonblocking)
135 		return __v4l2_event_dequeue(fh, event);
136 
137 	/* Release the vdev lock while waiting */
138 	if (fh->vdev->lock)
139 		mutex_unlock(fh->vdev->lock);
140 
141 	do {
142 		ret = wait_event_interruptible(events->wait,
143 					       events->navailable != 0);
144 		if (ret < 0)
145 			break;
146 
147 		ret = __v4l2_event_dequeue(fh, event);
148 	} while (ret == -ENOENT);
149 
150 	if (fh->vdev->lock)
151 		mutex_lock(fh->vdev->lock);
152 
153 	return ret;
154 }
155 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
156 
157 /* Caller must hold fh->event->lock! */
v4l2_event_subscribed(struct v4l2_fh * fh,u32 type)158 static struct v4l2_subscribed_event *v4l2_event_subscribed(
159 	struct v4l2_fh *fh, u32 type)
160 {
161 	struct v4l2_events *events = fh->events;
162 	struct v4l2_subscribed_event *sev;
163 
164 	assert_spin_locked(&fh->vdev->fh_lock);
165 
166 	list_for_each_entry(sev, &events->subscribed, list) {
167 		if (sev->type == type)
168 			return sev;
169 	}
170 
171 	return NULL;
172 }
173 
v4l2_event_queue(struct video_device * vdev,const struct v4l2_event * ev)174 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
175 {
176 	struct v4l2_fh *fh;
177 	unsigned long flags;
178 	struct timespec timestamp;
179 
180 	ktime_get_ts(&timestamp);
181 
182 	spin_lock_irqsave(&vdev->fh_lock, flags);
183 
184 	list_for_each_entry(fh, &vdev->fh_list, list) {
185 		struct v4l2_events *events = fh->events;
186 		struct v4l2_kevent *kev;
187 
188 		/* Are we subscribed? */
189 		if (!v4l2_event_subscribed(fh, ev->type))
190 			continue;
191 
192 		/* Increase event sequence number on fh. */
193 		events->sequence++;
194 
195 		/* Do we have any free events? */
196 		if (list_empty(&events->free))
197 			continue;
198 
199 		/* Take one and fill it. */
200 		kev = list_first_entry(&events->free, struct v4l2_kevent, list);
201 		kev->event.type = ev->type;
202 		kev->event.u = ev->u;
203 		kev->event.timestamp = timestamp;
204 		kev->event.sequence = events->sequence;
205 		list_move_tail(&kev->list, &events->available);
206 
207 		events->navailable++;
208 
209 		wake_up_all(&events->wait);
210 	}
211 
212 	spin_unlock_irqrestore(&vdev->fh_lock, flags);
213 }
214 EXPORT_SYMBOL_GPL(v4l2_event_queue);
215 
v4l2_event_pending(struct v4l2_fh * fh)216 int v4l2_event_pending(struct v4l2_fh *fh)
217 {
218 	return fh->events->navailable;
219 }
220 EXPORT_SYMBOL_GPL(v4l2_event_pending);
221 
v4l2_event_subscribe(struct v4l2_fh * fh,struct v4l2_event_subscription * sub)222 int v4l2_event_subscribe(struct v4l2_fh *fh,
223 			 struct v4l2_event_subscription *sub)
224 {
225 	struct v4l2_events *events = fh->events;
226 	struct v4l2_subscribed_event *sev;
227 	unsigned long flags;
228 
229 	if (fh->events == NULL) {
230 		WARN_ON(1);
231 		return -ENOMEM;
232 	}
233 
234 	sev = kmalloc(sizeof(*sev), GFP_KERNEL);
235 	if (!sev)
236 		return -ENOMEM;
237 
238 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
239 
240 	if (v4l2_event_subscribed(fh, sub->type) == NULL) {
241 		INIT_LIST_HEAD(&sev->list);
242 		sev->type = sub->type;
243 
244 		list_add(&sev->list, &events->subscribed);
245 		sev = NULL;
246 	}
247 
248 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
249 
250 	kfree(sev);
251 
252 	return 0;
253 }
254 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
255 
v4l2_event_unsubscribe_all(struct v4l2_fh * fh)256 static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
257 {
258 	struct v4l2_events *events = fh->events;
259 	struct v4l2_subscribed_event *sev;
260 	unsigned long flags;
261 
262 	do {
263 		sev = NULL;
264 
265 		spin_lock_irqsave(&fh->vdev->fh_lock, flags);
266 		if (!list_empty(&events->subscribed)) {
267 			sev = list_first_entry(&events->subscribed,
268 				       struct v4l2_subscribed_event, list);
269 			list_del(&sev->list);
270 		}
271 		spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
272 		kfree(sev);
273 	} while (sev);
274 }
275 
v4l2_event_unsubscribe(struct v4l2_fh * fh,struct v4l2_event_subscription * sub)276 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
277 			   struct v4l2_event_subscription *sub)
278 {
279 	struct v4l2_subscribed_event *sev;
280 	unsigned long flags;
281 
282 	if (sub->type == V4L2_EVENT_ALL) {
283 		v4l2_event_unsubscribe_all(fh);
284 		return 0;
285 	}
286 
287 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
288 
289 	sev = v4l2_event_subscribed(fh, sub->type);
290 	if (sev != NULL)
291 		list_del(&sev->list);
292 
293 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
294 
295 	kfree(sev);
296 
297 	return 0;
298 }
299 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
300