1 /*
2 * uvc_queue.c -- USB Video Class driver - Buffers management
3 *
4 * Copyright (C) 2005-2010
5 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 */
13
14 #include <linux/atomic.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/usb.h>
20 #include <linux/videodev2.h>
21 #include <linux/vmalloc.h>
22 #include <linux/wait.h>
23 #include <media/videobuf2-vmalloc.h>
24
25 #include "uvcvideo.h"
26
27 /* ------------------------------------------------------------------------
28 * Video buffers queue management.
29 *
30 * Video queues is initialized by uvc_queue_init(). The function performs
31 * basic initialization of the uvc_video_queue struct and never fails.
32 *
33 * Video buffers are managed by videobuf2. The driver uses a mutex to protect
34 * the videobuf2 queue operations by serializing calls to videobuf2 and a
35 * spinlock to protect the IRQ queue that holds the buffers to be processed by
36 * the driver.
37 */
38
39 /* -----------------------------------------------------------------------------
40 * videobuf2 queue operations
41 */
42
uvc_queue_setup(struct vb2_queue * vq,const struct v4l2_format * fmt,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],void * alloc_ctxs[])43 static int uvc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
44 unsigned int *nbuffers, unsigned int *nplanes,
45 unsigned int sizes[], void *alloc_ctxs[])
46 {
47 struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
48 struct uvc_streaming *stream =
49 container_of(queue, struct uvc_streaming, queue);
50
51 if (*nbuffers > UVC_MAX_VIDEO_BUFFERS)
52 *nbuffers = UVC_MAX_VIDEO_BUFFERS;
53
54 *nplanes = 1;
55
56 sizes[0] = stream->ctrl.dwMaxVideoFrameSize;
57
58 return 0;
59 }
60
uvc_buffer_prepare(struct vb2_buffer * vb)61 static int uvc_buffer_prepare(struct vb2_buffer *vb)
62 {
63 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
64 struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);
65
66 if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
67 vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
68 uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
69 return -EINVAL;
70 }
71
72 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED))
73 return -ENODEV;
74
75 buf->state = UVC_BUF_STATE_QUEUED;
76 buf->error = 0;
77 buf->mem = vb2_plane_vaddr(vb, 0);
78 buf->length = vb2_plane_size(vb, 0);
79 if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
80 buf->bytesused = 0;
81 else
82 buf->bytesused = vb2_get_plane_payload(vb, 0);
83
84 return 0;
85 }
86
uvc_buffer_queue(struct vb2_buffer * vb)87 static void uvc_buffer_queue(struct vb2_buffer *vb)
88 {
89 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
90 struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);
91 unsigned long flags;
92
93 spin_lock_irqsave(&queue->irqlock, flags);
94 if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) {
95 list_add_tail(&buf->queue, &queue->irqqueue);
96 } else {
97 /* If the device is disconnected return the buffer to userspace
98 * directly. The next QBUF call will fail with -ENODEV.
99 */
100 buf->state = UVC_BUF_STATE_ERROR;
101 vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
102 }
103
104 spin_unlock_irqrestore(&queue->irqlock, flags);
105 }
106
uvc_buffer_finish(struct vb2_buffer * vb)107 static int uvc_buffer_finish(struct vb2_buffer *vb)
108 {
109 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
110 struct uvc_streaming *stream =
111 container_of(queue, struct uvc_streaming, queue);
112 struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);
113
114 uvc_video_clock_update(stream, &vb->v4l2_buf, buf);
115 return 0;
116 }
117
118 static struct vb2_ops uvc_queue_qops = {
119 .queue_setup = uvc_queue_setup,
120 .buf_prepare = uvc_buffer_prepare,
121 .buf_queue = uvc_buffer_queue,
122 .buf_finish = uvc_buffer_finish,
123 };
124
uvc_queue_init(struct uvc_video_queue * queue,enum v4l2_buf_type type,int drop_corrupted)125 void uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
126 int drop_corrupted)
127 {
128 queue->queue.type = type;
129 queue->queue.io_modes = VB2_MMAP | VB2_USERPTR;
130 queue->queue.drv_priv = queue;
131 queue->queue.buf_struct_size = sizeof(struct uvc_buffer);
132 queue->queue.ops = &uvc_queue_qops;
133 queue->queue.mem_ops = &vb2_vmalloc_memops;
134 vb2_queue_init(&queue->queue);
135
136 mutex_init(&queue->mutex);
137 spin_lock_init(&queue->irqlock);
138 INIT_LIST_HEAD(&queue->irqqueue);
139 queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0;
140 }
141
142 /* -----------------------------------------------------------------------------
143 * V4L2 queue operations
144 */
145
uvc_alloc_buffers(struct uvc_video_queue * queue,struct v4l2_requestbuffers * rb)146 int uvc_alloc_buffers(struct uvc_video_queue *queue,
147 struct v4l2_requestbuffers *rb)
148 {
149 int ret;
150
151 mutex_lock(&queue->mutex);
152 ret = vb2_reqbufs(&queue->queue, rb);
153 mutex_unlock(&queue->mutex);
154
155 return ret ? ret : rb->count;
156 }
157
uvc_free_buffers(struct uvc_video_queue * queue)158 void uvc_free_buffers(struct uvc_video_queue *queue)
159 {
160 mutex_lock(&queue->mutex);
161 vb2_queue_release(&queue->queue);
162 mutex_unlock(&queue->mutex);
163 }
164
uvc_query_buffer(struct uvc_video_queue * queue,struct v4l2_buffer * buf)165 int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
166 {
167 int ret;
168
169 mutex_lock(&queue->mutex);
170 ret = vb2_querybuf(&queue->queue, buf);
171 mutex_unlock(&queue->mutex);
172
173 return ret;
174 }
175
uvc_queue_buffer(struct uvc_video_queue * queue,struct v4l2_buffer * buf)176 int uvc_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
177 {
178 int ret;
179
180 mutex_lock(&queue->mutex);
181 ret = vb2_qbuf(&queue->queue, buf);
182 mutex_unlock(&queue->mutex);
183
184 return ret;
185 }
186
uvc_dequeue_buffer(struct uvc_video_queue * queue,struct v4l2_buffer * buf,int nonblocking)187 int uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
188 int nonblocking)
189 {
190 int ret;
191
192 mutex_lock(&queue->mutex);
193 ret = vb2_dqbuf(&queue->queue, buf, nonblocking);
194 mutex_unlock(&queue->mutex);
195
196 return ret;
197 }
198
uvc_queue_mmap(struct uvc_video_queue * queue,struct vm_area_struct * vma)199 int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
200 {
201 int ret;
202
203 mutex_lock(&queue->mutex);
204 ret = vb2_mmap(&queue->queue, vma);
205 mutex_unlock(&queue->mutex);
206
207 return ret;
208 }
209
uvc_queue_poll(struct uvc_video_queue * queue,struct file * file,poll_table * wait)210 unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
211 poll_table *wait)
212 {
213 unsigned int ret;
214
215 mutex_lock(&queue->mutex);
216 ret = vb2_poll(&queue->queue, file, wait);
217 mutex_unlock(&queue->mutex);
218
219 return ret;
220 }
221
222 /* -----------------------------------------------------------------------------
223 *
224 */
225
226 /*
227 * Check if buffers have been allocated.
228 */
uvc_queue_allocated(struct uvc_video_queue * queue)229 int uvc_queue_allocated(struct uvc_video_queue *queue)
230 {
231 int allocated;
232
233 mutex_lock(&queue->mutex);
234 allocated = vb2_is_busy(&queue->queue);
235 mutex_unlock(&queue->mutex);
236
237 return allocated;
238 }
239
240 #ifndef CONFIG_MMU
241 /*
242 * Get unmapped area.
243 *
244 * NO-MMU arch need this function to make mmap() work correctly.
245 */
uvc_queue_get_unmapped_area(struct uvc_video_queue * queue,unsigned long pgoff)246 unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
247 unsigned long pgoff)
248 {
249 struct uvc_buffer *buffer;
250 unsigned int i;
251 unsigned long ret;
252
253 mutex_lock(&queue->mutex);
254 for (i = 0; i < queue->count; ++i) {
255 buffer = &queue->buffer[i];
256 if ((buffer->buf.m.offset >> PAGE_SHIFT) == pgoff)
257 break;
258 }
259 if (i == queue->count) {
260 ret = -EINVAL;
261 goto done;
262 }
263 ret = (unsigned long)buf->mem;
264 done:
265 mutex_unlock(&queue->mutex);
266 return ret;
267 }
268 #endif
269
270 /*
271 * Enable or disable the video buffers queue.
272 *
273 * The queue must be enabled before starting video acquisition and must be
274 * disabled after stopping it. This ensures that the video buffers queue
275 * state can be properly initialized before buffers are accessed from the
276 * interrupt handler.
277 *
278 * Enabling the video queue returns -EBUSY if the queue is already enabled.
279 *
280 * Disabling the video queue cancels the queue and removes all buffers from
281 * the main queue.
282 *
283 * This function can't be called from interrupt context. Use
284 * uvc_queue_cancel() instead.
285 */
uvc_queue_enable(struct uvc_video_queue * queue,int enable)286 int uvc_queue_enable(struct uvc_video_queue *queue, int enable)
287 {
288 unsigned long flags;
289 int ret;
290
291 mutex_lock(&queue->mutex);
292 if (enable) {
293 ret = vb2_streamon(&queue->queue, queue->queue.type);
294 if (ret < 0)
295 goto done;
296
297 queue->buf_used = 0;
298 } else {
299 ret = vb2_streamoff(&queue->queue, queue->queue.type);
300 if (ret < 0)
301 goto done;
302
303 spin_lock_irqsave(&queue->irqlock, flags);
304 INIT_LIST_HEAD(&queue->irqqueue);
305 spin_unlock_irqrestore(&queue->irqlock, flags);
306 }
307
308 done:
309 mutex_unlock(&queue->mutex);
310 return ret;
311 }
312
313 /*
314 * Cancel the video buffers queue.
315 *
316 * Cancelling the queue marks all buffers on the irq queue as erroneous,
317 * wakes them up and removes them from the queue.
318 *
319 * If the disconnect parameter is set, further calls to uvc_queue_buffer will
320 * fail with -ENODEV.
321 *
322 * This function acquires the irq spinlock and can be called from interrupt
323 * context.
324 */
uvc_queue_cancel(struct uvc_video_queue * queue,int disconnect)325 void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
326 {
327 struct uvc_buffer *buf;
328 unsigned long flags;
329
330 spin_lock_irqsave(&queue->irqlock, flags);
331 while (!list_empty(&queue->irqqueue)) {
332 buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
333 queue);
334 list_del(&buf->queue);
335 buf->state = UVC_BUF_STATE_ERROR;
336 vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
337 }
338 /* This must be protected by the irqlock spinlock to avoid race
339 * conditions between uvc_buffer_queue and the disconnection event that
340 * could result in an interruptible wait in uvc_dequeue_buffer. Do not
341 * blindly replace this logic by checking for the UVC_QUEUE_DISCONNECTED
342 * state outside the queue code.
343 */
344 if (disconnect)
345 queue->flags |= UVC_QUEUE_DISCONNECTED;
346 spin_unlock_irqrestore(&queue->irqlock, flags);
347 }
348
uvc_queue_next_buffer(struct uvc_video_queue * queue,struct uvc_buffer * buf)349 struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
350 struct uvc_buffer *buf)
351 {
352 struct uvc_buffer *nextbuf;
353 unsigned long flags;
354
355 if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) {
356 buf->error = 0;
357 buf->state = UVC_BUF_STATE_QUEUED;
358 buf->bytesused = 0;
359 vb2_set_plane_payload(&buf->buf, 0, 0);
360 return buf;
361 }
362
363 spin_lock_irqsave(&queue->irqlock, flags);
364 list_del(&buf->queue);
365 if (!list_empty(&queue->irqqueue))
366 nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
367 queue);
368 else
369 nextbuf = NULL;
370 spin_unlock_irqrestore(&queue->irqlock, flags);
371
372 buf->state = buf->error ? VB2_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
373 vb2_set_plane_payload(&buf->buf, 0, buf->bytesused);
374 vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE);
375
376 return nextbuf;
377 }
378