1 /*
2  * videobuf2-core.c - video buffer 2 core framework
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *	   Marek Szyprowski <m.szyprowski@samsung.com>
8  *
9  * The vb2_thread implementation was based on code from videobuf-dvb.c:
10  *	(c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation.
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/mm.h>
23 #include <linux/poll.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/freezer.h>
27 #include <linux/kthread.h>
28 
29 #include <media/videobuf2-core.h>
30 #include <media/v4l2-mc.h>
31 
32 #include <trace/events/vb2.h>
33 
34 static int debug;
35 module_param(debug, int, 0644);
36 
37 #define dprintk(q, level, fmt, arg...)					\
38 	do {								\
39 		if (debug >= level)					\
40 			pr_info("[%s] %s: " fmt, (q)->name, __func__,	\
41 				## arg);				\
42 	} while (0)
43 
44 #ifdef CONFIG_VIDEO_ADV_DEBUG
45 
46 /*
47  * If advanced debugging is on, then count how often each op is called
48  * successfully, which can either be per-buffer or per-queue.
49  *
50  * This makes it easy to check that the 'init' and 'cleanup'
51  * (and variations thereof) stay balanced.
52  */
53 
54 #define log_memop(vb, op)						\
55 	dprintk((vb)->vb2_queue, 2, "call_memop(%d, %s)%s\n",		\
56 		(vb)->index, #op,					\
57 		(vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
58 
59 #define call_memop(vb, op, args...)					\
60 ({									\
61 	struct vb2_queue *_q = (vb)->vb2_queue;				\
62 	int err;							\
63 									\
64 	log_memop(vb, op);						\
65 	err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0;		\
66 	if (!err)							\
67 		(vb)->cnt_mem_ ## op++;					\
68 	err;								\
69 })
70 
71 #define call_ptr_memop(op, vb, args...)					\
72 ({									\
73 	struct vb2_queue *_q = (vb)->vb2_queue;				\
74 	void *ptr;							\
75 									\
76 	log_memop(vb, op);						\
77 	ptr = _q->mem_ops->op ? _q->mem_ops->op(vb, args) : NULL;	\
78 	if (!IS_ERR_OR_NULL(ptr))					\
79 		(vb)->cnt_mem_ ## op++;					\
80 	ptr;								\
81 })
82 
83 #define call_void_memop(vb, op, args...)				\
84 ({									\
85 	struct vb2_queue *_q = (vb)->vb2_queue;				\
86 									\
87 	log_memop(vb, op);						\
88 	if (_q->mem_ops->op)						\
89 		_q->mem_ops->op(args);					\
90 	(vb)->cnt_mem_ ## op++;						\
91 })
92 
93 #define log_qop(q, op)							\
94 	dprintk(q, 2, "call_qop(%s)%s\n", #op,				\
95 		(q)->ops->op ? "" : " (nop)")
96 
97 #define call_qop(q, op, args...)					\
98 ({									\
99 	int err;							\
100 									\
101 	log_qop(q, op);							\
102 	err = (q)->ops->op ? (q)->ops->op(args) : 0;			\
103 	if (!err)							\
104 		(q)->cnt_ ## op++;					\
105 	err;								\
106 })
107 
108 #define call_void_qop(q, op, args...)					\
109 ({									\
110 	log_qop(q, op);							\
111 	if ((q)->ops->op)						\
112 		(q)->ops->op(args);					\
113 	(q)->cnt_ ## op++;						\
114 })
115 
116 #define log_vb_qop(vb, op, args...)					\
117 	dprintk((vb)->vb2_queue, 2, "call_vb_qop(%d, %s)%s\n",		\
118 		(vb)->index, #op,					\
119 		(vb)->vb2_queue->ops->op ? "" : " (nop)")
120 
121 #define call_vb_qop(vb, op, args...)					\
122 ({									\
123 	int err;							\
124 									\
125 	log_vb_qop(vb, op);						\
126 	err = (vb)->vb2_queue->ops->op ?				\
127 		(vb)->vb2_queue->ops->op(args) : 0;			\
128 	if (!err)							\
129 		(vb)->cnt_ ## op++;					\
130 	err;								\
131 })
132 
133 #define call_void_vb_qop(vb, op, args...)				\
134 ({									\
135 	log_vb_qop(vb, op);						\
136 	if ((vb)->vb2_queue->ops->op)					\
137 		(vb)->vb2_queue->ops->op(args);				\
138 	(vb)->cnt_ ## op++;						\
139 })
140 
141 #else
142 
143 #define call_memop(vb, op, args...)					\
144 	((vb)->vb2_queue->mem_ops->op ?					\
145 		(vb)->vb2_queue->mem_ops->op(args) : 0)
146 
147 #define call_ptr_memop(op, vb, args...)					\
148 	((vb)->vb2_queue->mem_ops->op ?					\
149 		(vb)->vb2_queue->mem_ops->op(vb, args) : NULL)
150 
151 #define call_void_memop(vb, op, args...)				\
152 	do {								\
153 		if ((vb)->vb2_queue->mem_ops->op)			\
154 			(vb)->vb2_queue->mem_ops->op(args);		\
155 	} while (0)
156 
157 #define call_qop(q, op, args...)					\
158 	((q)->ops->op ? (q)->ops->op(args) : 0)
159 
160 #define call_void_qop(q, op, args...)					\
161 	do {								\
162 		if ((q)->ops->op)					\
163 			(q)->ops->op(args);				\
164 	} while (0)
165 
166 #define call_vb_qop(vb, op, args...)					\
167 	((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
168 
169 #define call_void_vb_qop(vb, op, args...)				\
170 	do {								\
171 		if ((vb)->vb2_queue->ops->op)				\
172 			(vb)->vb2_queue->ops->op(args);			\
173 	} while (0)
174 
175 #endif
176 
177 #define call_bufop(q, op, args...)					\
178 ({									\
179 	int ret = 0;							\
180 	if (q && q->buf_ops && q->buf_ops->op)				\
181 		ret = q->buf_ops->op(args);				\
182 	ret;								\
183 })
184 
185 #define call_void_bufop(q, op, args...)					\
186 ({									\
187 	if (q && q->buf_ops && q->buf_ops->op)				\
188 		q->buf_ops->op(args);					\
189 })
190 
191 static void __vb2_queue_cancel(struct vb2_queue *q);
192 static void __enqueue_in_driver(struct vb2_buffer *vb);
193 
vb2_state_name(enum vb2_buffer_state s)194 static const char *vb2_state_name(enum vb2_buffer_state s)
195 {
196 	static const char * const state_names[] = {
197 		[VB2_BUF_STATE_DEQUEUED] = "dequeued",
198 		[VB2_BUF_STATE_IN_REQUEST] = "in request",
199 		[VB2_BUF_STATE_PREPARING] = "preparing",
200 		[VB2_BUF_STATE_QUEUED] = "queued",
201 		[VB2_BUF_STATE_ACTIVE] = "active",
202 		[VB2_BUF_STATE_DONE] = "done",
203 		[VB2_BUF_STATE_ERROR] = "error",
204 	};
205 
206 	if ((unsigned int)(s) < ARRAY_SIZE(state_names))
207 		return state_names[s];
208 	return "unknown";
209 }
210 
211 /*
212  * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
213  */
__vb2_buf_mem_alloc(struct vb2_buffer * vb)214 static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
215 {
216 	struct vb2_queue *q = vb->vb2_queue;
217 	void *mem_priv;
218 	int plane;
219 	int ret = -ENOMEM;
220 
221 	/*
222 	 * Allocate memory for all planes in this buffer
223 	 * NOTE: mmapped areas should be page aligned
224 	 */
225 	for (plane = 0; plane < vb->num_planes; ++plane) {
226 		/* Memops alloc requires size to be page aligned. */
227 		unsigned long size = PAGE_ALIGN(vb->planes[plane].length);
228 
229 		/* Did it wrap around? */
230 		if (size < vb->planes[plane].length)
231 			goto free;
232 
233 		mem_priv = call_ptr_memop(alloc,
234 					  vb,
235 					  q->alloc_devs[plane] ? : q->dev,
236 					  size);
237 		if (IS_ERR_OR_NULL(mem_priv)) {
238 			if (mem_priv)
239 				ret = PTR_ERR(mem_priv);
240 			goto free;
241 		}
242 
243 		/* Associate allocator private data with this plane */
244 		vb->planes[plane].mem_priv = mem_priv;
245 	}
246 
247 	return 0;
248 free:
249 	/* Free already allocated memory if one of the allocations failed */
250 	for (; plane > 0; --plane) {
251 		call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
252 		vb->planes[plane - 1].mem_priv = NULL;
253 	}
254 
255 	return ret;
256 }
257 
258 /*
259  * __vb2_buf_mem_free() - free memory of the given buffer
260  */
__vb2_buf_mem_free(struct vb2_buffer * vb)261 static void __vb2_buf_mem_free(struct vb2_buffer *vb)
262 {
263 	unsigned int plane;
264 
265 	for (plane = 0; plane < vb->num_planes; ++plane) {
266 		call_void_memop(vb, put, vb->planes[plane].mem_priv);
267 		vb->planes[plane].mem_priv = NULL;
268 		dprintk(vb->vb2_queue, 3, "freed plane %d of buffer %d\n",
269 			plane, vb->index);
270 	}
271 }
272 
273 /*
274  * __vb2_buf_userptr_put() - release userspace memory associated with
275  * a USERPTR buffer
276  */
__vb2_buf_userptr_put(struct vb2_buffer * vb)277 static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
278 {
279 	unsigned int plane;
280 
281 	for (plane = 0; plane < vb->num_planes; ++plane) {
282 		if (vb->planes[plane].mem_priv)
283 			call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
284 		vb->planes[plane].mem_priv = NULL;
285 	}
286 }
287 
288 /*
289  * __vb2_plane_dmabuf_put() - release memory associated with
290  * a DMABUF shared plane
291  */
__vb2_plane_dmabuf_put(struct vb2_buffer * vb,struct vb2_plane * p)292 static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
293 {
294 	if (!p->mem_priv)
295 		return;
296 
297 	if (p->dbuf_mapped)
298 		call_void_memop(vb, unmap_dmabuf, p->mem_priv);
299 
300 	call_void_memop(vb, detach_dmabuf, p->mem_priv);
301 	dma_buf_put(p->dbuf);
302 	p->mem_priv = NULL;
303 	p->dbuf = NULL;
304 	p->dbuf_mapped = 0;
305 }
306 
307 /*
308  * __vb2_buf_dmabuf_put() - release memory associated with
309  * a DMABUF shared buffer
310  */
__vb2_buf_dmabuf_put(struct vb2_buffer * vb)311 static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
312 {
313 	unsigned int plane;
314 
315 	for (plane = 0; plane < vb->num_planes; ++plane)
316 		__vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
317 }
318 
319 /*
320  * __vb2_buf_mem_prepare() - call ->prepare() on buffer's private memory
321  * to sync caches
322  */
__vb2_buf_mem_prepare(struct vb2_buffer * vb)323 static void __vb2_buf_mem_prepare(struct vb2_buffer *vb)
324 {
325 	unsigned int plane;
326 
327 	if (vb->synced)
328 		return;
329 
330 	vb->synced = 1;
331 	for (plane = 0; plane < vb->num_planes; ++plane)
332 		call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
333 }
334 
335 /*
336  * __vb2_buf_mem_finish() - call ->finish on buffer's private memory
337  * to sync caches
338  */
__vb2_buf_mem_finish(struct vb2_buffer * vb)339 static void __vb2_buf_mem_finish(struct vb2_buffer *vb)
340 {
341 	unsigned int plane;
342 
343 	if (!vb->synced)
344 		return;
345 
346 	vb->synced = 0;
347 	for (plane = 0; plane < vb->num_planes; ++plane)
348 		call_void_memop(vb, finish, vb->planes[plane].mem_priv);
349 }
350 
351 /*
352  * __setup_offsets() - setup unique offsets ("cookies") for every plane in
353  * the buffer.
354  */
__setup_offsets(struct vb2_buffer * vb)355 static void __setup_offsets(struct vb2_buffer *vb)
356 {
357 	struct vb2_queue *q = vb->vb2_queue;
358 	unsigned int plane;
359 	unsigned long off = 0;
360 
361 	if (vb->index) {
362 		struct vb2_buffer *prev = q->bufs[vb->index - 1];
363 		struct vb2_plane *p = &prev->planes[prev->num_planes - 1];
364 
365 		off = PAGE_ALIGN(p->m.offset + p->length);
366 	}
367 
368 	for (plane = 0; plane < vb->num_planes; ++plane) {
369 		vb->planes[plane].m.offset = off;
370 
371 		dprintk(q, 3, "buffer %d, plane %d offset 0x%08lx\n",
372 				vb->index, plane, off);
373 
374 		off += vb->planes[plane].length;
375 		off = PAGE_ALIGN(off);
376 	}
377 }
378 
init_buffer_cache_hints(struct vb2_queue * q,struct vb2_buffer * vb)379 static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb)
380 {
381 	/*
382 	 * DMA exporter should take care of cache syncs, so we can avoid
383 	 * explicit ->prepare()/->finish() syncs. For other ->memory types
384 	 * we always need ->prepare() or/and ->finish() cache sync.
385 	 */
386 	if (q->memory == VB2_MEMORY_DMABUF) {
387 		vb->skip_cache_sync_on_finish = 1;
388 		vb->skip_cache_sync_on_prepare = 1;
389 		return;
390 	}
391 
392 	/*
393 	 * ->finish() cache sync can be avoided when queue direction is
394 	 * TO_DEVICE.
395 	 */
396 	if (q->dma_dir == DMA_TO_DEVICE)
397 		vb->skip_cache_sync_on_finish = 1;
398 }
399 
400 /*
401  * __vb2_queue_alloc() - allocate vb2 buffer structures and (for MMAP type)
402  * video buffer memory for all buffers/planes on the queue and initializes the
403  * queue
404  *
405  * Returns the number of buffers successfully allocated.
406  */
__vb2_queue_alloc(struct vb2_queue * q,enum vb2_memory memory,unsigned int num_buffers,unsigned int num_planes,const unsigned plane_sizes[VB2_MAX_PLANES])407 static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
408 			     unsigned int num_buffers, unsigned int num_planes,
409 			     const unsigned plane_sizes[VB2_MAX_PLANES])
410 {
411 	unsigned int buffer, plane;
412 	struct vb2_buffer *vb;
413 	int ret;
414 
415 	/* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */
416 	num_buffers = min_t(unsigned int, num_buffers,
417 			    VB2_MAX_FRAME - q->num_buffers);
418 
419 	for (buffer = 0; buffer < num_buffers; ++buffer) {
420 		/* Allocate vb2 buffer structures */
421 		vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
422 		if (!vb) {
423 			dprintk(q, 1, "memory alloc for buffer struct failed\n");
424 			break;
425 		}
426 
427 		vb->state = VB2_BUF_STATE_DEQUEUED;
428 		vb->vb2_queue = q;
429 		vb->num_planes = num_planes;
430 		vb->index = q->num_buffers + buffer;
431 		vb->type = q->type;
432 		vb->memory = memory;
433 		init_buffer_cache_hints(q, vb);
434 		for (plane = 0; plane < num_planes; ++plane) {
435 			vb->planes[plane].length = plane_sizes[plane];
436 			vb->planes[plane].min_length = plane_sizes[plane];
437 		}
438 		call_void_bufop(q, init_buffer, vb);
439 
440 		q->bufs[vb->index] = vb;
441 
442 		/* Allocate video buffer memory for the MMAP type */
443 		if (memory == VB2_MEMORY_MMAP) {
444 			ret = __vb2_buf_mem_alloc(vb);
445 			if (ret) {
446 				dprintk(q, 1, "failed allocating memory for buffer %d\n",
447 					buffer);
448 				q->bufs[vb->index] = NULL;
449 				kfree(vb);
450 				break;
451 			}
452 			__setup_offsets(vb);
453 			/*
454 			 * Call the driver-provided buffer initialization
455 			 * callback, if given. An error in initialization
456 			 * results in queue setup failure.
457 			 */
458 			ret = call_vb_qop(vb, buf_init, vb);
459 			if (ret) {
460 				dprintk(q, 1, "buffer %d %p initialization failed\n",
461 					buffer, vb);
462 				__vb2_buf_mem_free(vb);
463 				q->bufs[vb->index] = NULL;
464 				kfree(vb);
465 				break;
466 			}
467 		}
468 	}
469 
470 	dprintk(q, 3, "allocated %d buffers, %d plane(s) each\n",
471 		buffer, num_planes);
472 
473 	return buffer;
474 }
475 
476 /*
477  * __vb2_free_mem() - release all video buffer memory for a given queue
478  */
__vb2_free_mem(struct vb2_queue * q,unsigned int buffers)479 static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
480 {
481 	unsigned int buffer;
482 	struct vb2_buffer *vb;
483 
484 	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
485 	     ++buffer) {
486 		vb = q->bufs[buffer];
487 		if (!vb)
488 			continue;
489 
490 		/* Free MMAP buffers or release USERPTR buffers */
491 		if (q->memory == VB2_MEMORY_MMAP)
492 			__vb2_buf_mem_free(vb);
493 		else if (q->memory == VB2_MEMORY_DMABUF)
494 			__vb2_buf_dmabuf_put(vb);
495 		else
496 			__vb2_buf_userptr_put(vb);
497 	}
498 }
499 
500 /*
501  * __vb2_queue_free() - free buffers at the end of the queue - video memory and
502  * related information, if no buffers are left return the queue to an
503  * uninitialized state. Might be called even if the queue has already been freed.
504  */
__vb2_queue_free(struct vb2_queue * q,unsigned int buffers)505 static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
506 {
507 	unsigned int buffer;
508 
509 	/*
510 	 * Sanity check: when preparing a buffer the queue lock is released for
511 	 * a short while (see __buf_prepare for the details), which would allow
512 	 * a race with a reqbufs which can call this function. Removing the
513 	 * buffers from underneath __buf_prepare is obviously a bad idea, so we
514 	 * check if any of the buffers is in the state PREPARING, and if so we
515 	 * just return -EAGAIN.
516 	 */
517 	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
518 	     ++buffer) {
519 		if (q->bufs[buffer] == NULL)
520 			continue;
521 		if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
522 			dprintk(q, 1, "preparing buffers, cannot free\n");
523 			return -EAGAIN;
524 		}
525 	}
526 
527 	/* Call driver-provided cleanup function for each buffer, if provided */
528 	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
529 	     ++buffer) {
530 		struct vb2_buffer *vb = q->bufs[buffer];
531 
532 		if (vb && vb->planes[0].mem_priv)
533 			call_void_vb_qop(vb, buf_cleanup, vb);
534 	}
535 
536 	/* Release video buffer memory */
537 	__vb2_free_mem(q, buffers);
538 
539 #ifdef CONFIG_VIDEO_ADV_DEBUG
540 	/*
541 	 * Check that all the calls were balances during the life-time of this
542 	 * queue. If not (or if the debug level is 1 or up), then dump the
543 	 * counters to the kernel log.
544 	 */
545 	if (q->num_buffers) {
546 		bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
547 				  q->cnt_wait_prepare != q->cnt_wait_finish;
548 
549 		if (unbalanced || debug) {
550 			pr_info("counters for queue %p:%s\n", q,
551 				unbalanced ? " UNBALANCED!" : "");
552 			pr_info("     setup: %u start_streaming: %u stop_streaming: %u\n",
553 				q->cnt_queue_setup, q->cnt_start_streaming,
554 				q->cnt_stop_streaming);
555 			pr_info("     wait_prepare: %u wait_finish: %u\n",
556 				q->cnt_wait_prepare, q->cnt_wait_finish);
557 		}
558 		q->cnt_queue_setup = 0;
559 		q->cnt_wait_prepare = 0;
560 		q->cnt_wait_finish = 0;
561 		q->cnt_start_streaming = 0;
562 		q->cnt_stop_streaming = 0;
563 	}
564 	for (buffer = 0; buffer < q->num_buffers; ++buffer) {
565 		struct vb2_buffer *vb = q->bufs[buffer];
566 		bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
567 				  vb->cnt_mem_prepare != vb->cnt_mem_finish ||
568 				  vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
569 				  vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
570 				  vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
571 				  vb->cnt_buf_queue != vb->cnt_buf_done ||
572 				  vb->cnt_buf_prepare != vb->cnt_buf_finish ||
573 				  vb->cnt_buf_init != vb->cnt_buf_cleanup;
574 
575 		if (unbalanced || debug) {
576 			pr_info("   counters for queue %p, buffer %d:%s\n",
577 				q, buffer, unbalanced ? " UNBALANCED!" : "");
578 			pr_info("     buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
579 				vb->cnt_buf_init, vb->cnt_buf_cleanup,
580 				vb->cnt_buf_prepare, vb->cnt_buf_finish);
581 			pr_info("     buf_out_validate: %u buf_queue: %u buf_done: %u buf_request_complete: %u\n",
582 				vb->cnt_buf_out_validate, vb->cnt_buf_queue,
583 				vb->cnt_buf_done, vb->cnt_buf_request_complete);
584 			pr_info("     alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
585 				vb->cnt_mem_alloc, vb->cnt_mem_put,
586 				vb->cnt_mem_prepare, vb->cnt_mem_finish,
587 				vb->cnt_mem_mmap);
588 			pr_info("     get_userptr: %u put_userptr: %u\n",
589 				vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
590 			pr_info("     attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
591 				vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
592 				vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
593 			pr_info("     get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
594 				vb->cnt_mem_get_dmabuf,
595 				vb->cnt_mem_num_users,
596 				vb->cnt_mem_vaddr,
597 				vb->cnt_mem_cookie);
598 		}
599 	}
600 #endif
601 
602 	/* Free vb2 buffers */
603 	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
604 	     ++buffer) {
605 		kfree(q->bufs[buffer]);
606 		q->bufs[buffer] = NULL;
607 	}
608 
609 	q->num_buffers -= buffers;
610 	if (!q->num_buffers) {
611 		q->memory = VB2_MEMORY_UNKNOWN;
612 		INIT_LIST_HEAD(&q->queued_list);
613 	}
614 	return 0;
615 }
616 
vb2_buffer_in_use(struct vb2_queue * q,struct vb2_buffer * vb)617 bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
618 {
619 	unsigned int plane;
620 	for (plane = 0; plane < vb->num_planes; ++plane) {
621 		void *mem_priv = vb->planes[plane].mem_priv;
622 		/*
623 		 * If num_users() has not been provided, call_memop
624 		 * will return 0, apparently nobody cares about this
625 		 * case anyway. If num_users() returns more than 1,
626 		 * we are not the only user of the plane's memory.
627 		 */
628 		if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
629 			return true;
630 	}
631 	return false;
632 }
633 EXPORT_SYMBOL(vb2_buffer_in_use);
634 
635 /*
636  * __buffers_in_use() - return true if any buffers on the queue are in use and
637  * the queue cannot be freed (by the means of REQBUFS(0)) call
638  */
__buffers_in_use(struct vb2_queue * q)639 static bool __buffers_in_use(struct vb2_queue *q)
640 {
641 	unsigned int buffer;
642 	for (buffer = 0; buffer < q->num_buffers; ++buffer) {
643 		if (vb2_buffer_in_use(q, q->bufs[buffer]))
644 			return true;
645 	}
646 	return false;
647 }
648 
vb2_core_querybuf(struct vb2_queue * q,unsigned int index,void * pb)649 void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
650 {
651 	call_void_bufop(q, fill_user_buffer, q->bufs[index], pb);
652 }
653 EXPORT_SYMBOL_GPL(vb2_core_querybuf);
654 
655 /*
656  * __verify_userptr_ops() - verify that all memory operations required for
657  * USERPTR queue type have been provided
658  */
__verify_userptr_ops(struct vb2_queue * q)659 static int __verify_userptr_ops(struct vb2_queue *q)
660 {
661 	if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
662 	    !q->mem_ops->put_userptr)
663 		return -EINVAL;
664 
665 	return 0;
666 }
667 
668 /*
669  * __verify_mmap_ops() - verify that all memory operations required for
670  * MMAP queue type have been provided
671  */
__verify_mmap_ops(struct vb2_queue * q)672 static int __verify_mmap_ops(struct vb2_queue *q)
673 {
674 	if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
675 	    !q->mem_ops->put || !q->mem_ops->mmap)
676 		return -EINVAL;
677 
678 	return 0;
679 }
680 
681 /*
682  * __verify_dmabuf_ops() - verify that all memory operations required for
683  * DMABUF queue type have been provided
684  */
__verify_dmabuf_ops(struct vb2_queue * q)685 static int __verify_dmabuf_ops(struct vb2_queue *q)
686 {
687 	if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
688 	    !q->mem_ops->detach_dmabuf  || !q->mem_ops->map_dmabuf ||
689 	    !q->mem_ops->unmap_dmabuf)
690 		return -EINVAL;
691 
692 	return 0;
693 }
694 
vb2_verify_memory_type(struct vb2_queue * q,enum vb2_memory memory,unsigned int type)695 int vb2_verify_memory_type(struct vb2_queue *q,
696 		enum vb2_memory memory, unsigned int type)
697 {
698 	if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR &&
699 	    memory != VB2_MEMORY_DMABUF) {
700 		dprintk(q, 1, "unsupported memory type\n");
701 		return -EINVAL;
702 	}
703 
704 	if (type != q->type) {
705 		dprintk(q, 1, "requested type is incorrect\n");
706 		return -EINVAL;
707 	}
708 
709 	/*
710 	 * Make sure all the required memory ops for given memory type
711 	 * are available.
712 	 */
713 	if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) {
714 		dprintk(q, 1, "MMAP for current setup unsupported\n");
715 		return -EINVAL;
716 	}
717 
718 	if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
719 		dprintk(q, 1, "USERPTR for current setup unsupported\n");
720 		return -EINVAL;
721 	}
722 
723 	if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
724 		dprintk(q, 1, "DMABUF for current setup unsupported\n");
725 		return -EINVAL;
726 	}
727 
728 	/*
729 	 * Place the busy tests at the end: -EBUSY can be ignored when
730 	 * create_bufs is called with count == 0, but count == 0 should still
731 	 * do the memory and type validation.
732 	 */
733 	if (vb2_fileio_is_active(q)) {
734 		dprintk(q, 1, "file io in progress\n");
735 		return -EBUSY;
736 	}
737 	return 0;
738 }
739 EXPORT_SYMBOL(vb2_verify_memory_type);
740 
set_queue_coherency(struct vb2_queue * q,bool non_coherent_mem)741 static void set_queue_coherency(struct vb2_queue *q, bool non_coherent_mem)
742 {
743 	q->non_coherent_mem = 0;
744 
745 	if (!vb2_queue_allows_cache_hints(q))
746 		return;
747 	q->non_coherent_mem = non_coherent_mem;
748 }
749 
verify_coherency_flags(struct vb2_queue * q,bool non_coherent_mem)750 static bool verify_coherency_flags(struct vb2_queue *q, bool non_coherent_mem)
751 {
752 	if (non_coherent_mem != q->non_coherent_mem) {
753 		dprintk(q, 1, "memory coherency model mismatch\n");
754 		return false;
755 	}
756 	return true;
757 }
758 
vb2_core_reqbufs(struct vb2_queue * q,enum vb2_memory memory,unsigned int flags,unsigned int * count)759 int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
760 		     unsigned int flags, unsigned int *count)
761 {
762 	unsigned int num_buffers, allocated_buffers, num_planes = 0;
763 	unsigned plane_sizes[VB2_MAX_PLANES] = { };
764 	bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
765 	unsigned int i;
766 	int ret;
767 
768 	if (q->streaming) {
769 		dprintk(q, 1, "streaming active\n");
770 		return -EBUSY;
771 	}
772 
773 	if (q->waiting_in_dqbuf && *count) {
774 		dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
775 		return -EBUSY;
776 	}
777 
778 	if (*count == 0 || q->num_buffers != 0 ||
779 	    (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) ||
780 	    !verify_coherency_flags(q, non_coherent_mem)) {
781 		/*
782 		 * We already have buffers allocated, so first check if they
783 		 * are not in use and can be freed.
784 		 */
785 		mutex_lock(&q->mmap_lock);
786 		if (debug && q->memory == VB2_MEMORY_MMAP &&
787 		    __buffers_in_use(q))
788 			dprintk(q, 1, "memory in use, orphaning buffers\n");
789 
790 		/*
791 		 * Call queue_cancel to clean up any buffers in the
792 		 * QUEUED state which is possible if buffers were prepared or
793 		 * queued without ever calling STREAMON.
794 		 */
795 		__vb2_queue_cancel(q);
796 		ret = __vb2_queue_free(q, q->num_buffers);
797 		mutex_unlock(&q->mmap_lock);
798 		if (ret)
799 			return ret;
800 
801 		/*
802 		 * In case of REQBUFS(0) return immediately without calling
803 		 * driver's queue_setup() callback and allocating resources.
804 		 */
805 		if (*count == 0)
806 			return 0;
807 	}
808 
809 	/*
810 	 * Make sure the requested values and current defaults are sane.
811 	 */
812 	WARN_ON(q->min_buffers_needed > VB2_MAX_FRAME);
813 	num_buffers = max_t(unsigned int, *count, q->min_buffers_needed);
814 	num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME);
815 	memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
816 	/*
817 	 * Set this now to ensure that drivers see the correct q->memory value
818 	 * in the queue_setup op.
819 	 */
820 	mutex_lock(&q->mmap_lock);
821 	q->memory = memory;
822 	mutex_unlock(&q->mmap_lock);
823 	set_queue_coherency(q, non_coherent_mem);
824 
825 	/*
826 	 * Ask the driver how many buffers and planes per buffer it requires.
827 	 * Driver also sets the size and allocator context for each plane.
828 	 */
829 	ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
830 		       plane_sizes, q->alloc_devs);
831 	if (ret)
832 		goto error;
833 
834 	/* Check that driver has set sane values */
835 	if (WARN_ON(!num_planes)) {
836 		ret = -EINVAL;
837 		goto error;
838 	}
839 
840 	for (i = 0; i < num_planes; i++)
841 		if (WARN_ON(!plane_sizes[i])) {
842 			ret = -EINVAL;
843 			goto error;
844 		}
845 
846 	/* Finally, allocate buffers and video memory */
847 	allocated_buffers =
848 		__vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes);
849 	if (allocated_buffers == 0) {
850 		dprintk(q, 1, "memory allocation failed\n");
851 		ret = -ENOMEM;
852 		goto error;
853 	}
854 
855 	/*
856 	 * There is no point in continuing if we can't allocate the minimum
857 	 * number of buffers needed by this vb2_queue.
858 	 */
859 	if (allocated_buffers < q->min_buffers_needed)
860 		ret = -ENOMEM;
861 
862 	/*
863 	 * Check if driver can handle the allocated number of buffers.
864 	 */
865 	if (!ret && allocated_buffers < num_buffers) {
866 		num_buffers = allocated_buffers;
867 		/*
868 		 * num_planes is set by the previous queue_setup(), but since it
869 		 * signals to queue_setup() whether it is called from create_bufs()
870 		 * vs reqbufs() we zero it here to signal that queue_setup() is
871 		 * called for the reqbufs() case.
872 		 */
873 		num_planes = 0;
874 
875 		ret = call_qop(q, queue_setup, q, &num_buffers,
876 			       &num_planes, plane_sizes, q->alloc_devs);
877 
878 		if (!ret && allocated_buffers < num_buffers)
879 			ret = -ENOMEM;
880 
881 		/*
882 		 * Either the driver has accepted a smaller number of buffers,
883 		 * or .queue_setup() returned an error
884 		 */
885 	}
886 
887 	mutex_lock(&q->mmap_lock);
888 	q->num_buffers = allocated_buffers;
889 
890 	if (ret < 0) {
891 		/*
892 		 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
893 		 * from q->num_buffers and it will reset q->memory to
894 		 * VB2_MEMORY_UNKNOWN.
895 		 */
896 		__vb2_queue_free(q, allocated_buffers);
897 		mutex_unlock(&q->mmap_lock);
898 		return ret;
899 	}
900 	mutex_unlock(&q->mmap_lock);
901 
902 	/*
903 	 * Return the number of successfully allocated buffers
904 	 * to the userspace.
905 	 */
906 	*count = allocated_buffers;
907 	q->waiting_for_buffers = !q->is_output;
908 
909 	return 0;
910 
911 error:
912 	mutex_lock(&q->mmap_lock);
913 	q->memory = VB2_MEMORY_UNKNOWN;
914 	mutex_unlock(&q->mmap_lock);
915 	return ret;
916 }
917 EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
918 
vb2_core_create_bufs(struct vb2_queue * q,enum vb2_memory memory,unsigned int flags,unsigned int * count,unsigned int requested_planes,const unsigned int requested_sizes[])919 int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
920 			 unsigned int flags, unsigned int *count,
921 			 unsigned int requested_planes,
922 			 const unsigned int requested_sizes[])
923 {
924 	unsigned int num_planes = 0, num_buffers, allocated_buffers;
925 	unsigned plane_sizes[VB2_MAX_PLANES] = { };
926 	bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
927 	bool no_previous_buffers = !q->num_buffers;
928 	int ret;
929 
930 	if (q->num_buffers == VB2_MAX_FRAME) {
931 		dprintk(q, 1, "maximum number of buffers already allocated\n");
932 		return -ENOBUFS;
933 	}
934 
935 	if (no_previous_buffers) {
936 		if (q->waiting_in_dqbuf && *count) {
937 			dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
938 			return -EBUSY;
939 		}
940 		memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
941 		/*
942 		 * Set this now to ensure that drivers see the correct q->memory
943 		 * value in the queue_setup op.
944 		 */
945 		mutex_lock(&q->mmap_lock);
946 		q->memory = memory;
947 		mutex_unlock(&q->mmap_lock);
948 		q->waiting_for_buffers = !q->is_output;
949 		set_queue_coherency(q, non_coherent_mem);
950 	} else {
951 		if (q->memory != memory) {
952 			dprintk(q, 1, "memory model mismatch\n");
953 			return -EINVAL;
954 		}
955 		if (!verify_coherency_flags(q, non_coherent_mem))
956 			return -EINVAL;
957 	}
958 
959 	num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
960 
961 	if (requested_planes && requested_sizes) {
962 		num_planes = requested_planes;
963 		memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes));
964 	}
965 
966 	/*
967 	 * Ask the driver, whether the requested number of buffers, planes per
968 	 * buffer and their sizes are acceptable
969 	 */
970 	ret = call_qop(q, queue_setup, q, &num_buffers,
971 		       &num_planes, plane_sizes, q->alloc_devs);
972 	if (ret)
973 		goto error;
974 
975 	/* Finally, allocate buffers and video memory */
976 	allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
977 				num_planes, plane_sizes);
978 	if (allocated_buffers == 0) {
979 		dprintk(q, 1, "memory allocation failed\n");
980 		ret = -ENOMEM;
981 		goto error;
982 	}
983 
984 	/*
985 	 * Check if driver can handle the so far allocated number of buffers.
986 	 */
987 	if (allocated_buffers < num_buffers) {
988 		num_buffers = allocated_buffers;
989 
990 		/*
991 		 * q->num_buffers contains the total number of buffers, that the
992 		 * queue driver has set up
993 		 */
994 		ret = call_qop(q, queue_setup, q, &num_buffers,
995 			       &num_planes, plane_sizes, q->alloc_devs);
996 
997 		if (!ret && allocated_buffers < num_buffers)
998 			ret = -ENOMEM;
999 
1000 		/*
1001 		 * Either the driver has accepted a smaller number of buffers,
1002 		 * or .queue_setup() returned an error
1003 		 */
1004 	}
1005 
1006 	mutex_lock(&q->mmap_lock);
1007 	q->num_buffers += allocated_buffers;
1008 
1009 	if (ret < 0) {
1010 		/*
1011 		 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
1012 		 * from q->num_buffers and it will reset q->memory to
1013 		 * VB2_MEMORY_UNKNOWN.
1014 		 */
1015 		__vb2_queue_free(q, allocated_buffers);
1016 		mutex_unlock(&q->mmap_lock);
1017 		return -ENOMEM;
1018 	}
1019 	mutex_unlock(&q->mmap_lock);
1020 
1021 	/*
1022 	 * Return the number of successfully allocated buffers
1023 	 * to the userspace.
1024 	 */
1025 	*count = allocated_buffers;
1026 
1027 	return 0;
1028 
1029 error:
1030 	if (no_previous_buffers) {
1031 		mutex_lock(&q->mmap_lock);
1032 		q->memory = VB2_MEMORY_UNKNOWN;
1033 		mutex_unlock(&q->mmap_lock);
1034 	}
1035 	return ret;
1036 }
1037 EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
1038 
vb2_plane_vaddr(struct vb2_buffer * vb,unsigned int plane_no)1039 void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
1040 {
1041 	if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
1042 		return NULL;
1043 
1044 	return call_ptr_memop(vaddr, vb, vb->planes[plane_no].mem_priv);
1045 
1046 }
1047 EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
1048 
vb2_plane_cookie(struct vb2_buffer * vb,unsigned int plane_no)1049 void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
1050 {
1051 	if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
1052 		return NULL;
1053 
1054 	return call_ptr_memop(cookie, vb, vb->planes[plane_no].mem_priv);
1055 }
1056 EXPORT_SYMBOL_GPL(vb2_plane_cookie);
1057 
vb2_buffer_done(struct vb2_buffer * vb,enum vb2_buffer_state state)1058 void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
1059 {
1060 	struct vb2_queue *q = vb->vb2_queue;
1061 	unsigned long flags;
1062 
1063 	if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
1064 		return;
1065 
1066 	if (WARN_ON(state != VB2_BUF_STATE_DONE &&
1067 		    state != VB2_BUF_STATE_ERROR &&
1068 		    state != VB2_BUF_STATE_QUEUED))
1069 		state = VB2_BUF_STATE_ERROR;
1070 
1071 #ifdef CONFIG_VIDEO_ADV_DEBUG
1072 	/*
1073 	 * Although this is not a callback, it still does have to balance
1074 	 * with the buf_queue op. So update this counter manually.
1075 	 */
1076 	vb->cnt_buf_done++;
1077 #endif
1078 	dprintk(q, 4, "done processing on buffer %d, state: %s\n",
1079 		vb->index, vb2_state_name(state));
1080 
1081 	if (state != VB2_BUF_STATE_QUEUED)
1082 		__vb2_buf_mem_finish(vb);
1083 
1084 	spin_lock_irqsave(&q->done_lock, flags);
1085 	if (state == VB2_BUF_STATE_QUEUED) {
1086 		vb->state = VB2_BUF_STATE_QUEUED;
1087 	} else {
1088 		/* Add the buffer to the done buffers list */
1089 		list_add_tail(&vb->done_entry, &q->done_list);
1090 		vb->state = state;
1091 	}
1092 	atomic_dec(&q->owned_by_drv_count);
1093 
1094 	if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) {
1095 		media_request_object_unbind(&vb->req_obj);
1096 		media_request_object_put(&vb->req_obj);
1097 	}
1098 
1099 	spin_unlock_irqrestore(&q->done_lock, flags);
1100 
1101 	trace_vb2_buf_done(q, vb);
1102 
1103 	switch (state) {
1104 	case VB2_BUF_STATE_QUEUED:
1105 		return;
1106 	default:
1107 		/* Inform any processes that may be waiting for buffers */
1108 		wake_up(&q->done_wq);
1109 		break;
1110 	}
1111 }
1112 EXPORT_SYMBOL_GPL(vb2_buffer_done);
1113 
vb2_discard_done(struct vb2_queue * q)1114 void vb2_discard_done(struct vb2_queue *q)
1115 {
1116 	struct vb2_buffer *vb;
1117 	unsigned long flags;
1118 
1119 	spin_lock_irqsave(&q->done_lock, flags);
1120 	list_for_each_entry(vb, &q->done_list, done_entry)
1121 		vb->state = VB2_BUF_STATE_ERROR;
1122 	spin_unlock_irqrestore(&q->done_lock, flags);
1123 }
1124 EXPORT_SYMBOL_GPL(vb2_discard_done);
1125 
1126 /*
1127  * __prepare_mmap() - prepare an MMAP buffer
1128  */
__prepare_mmap(struct vb2_buffer * vb)1129 static int __prepare_mmap(struct vb2_buffer *vb)
1130 {
1131 	int ret = 0;
1132 
1133 	ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
1134 			 vb, vb->planes);
1135 	return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
1136 }
1137 
1138 /*
1139  * __prepare_userptr() - prepare a USERPTR buffer
1140  */
__prepare_userptr(struct vb2_buffer * vb)1141 static int __prepare_userptr(struct vb2_buffer *vb)
1142 {
1143 	struct vb2_plane planes[VB2_MAX_PLANES];
1144 	struct vb2_queue *q = vb->vb2_queue;
1145 	void *mem_priv;
1146 	unsigned int plane;
1147 	int ret = 0;
1148 	bool reacquired = vb->planes[0].mem_priv == NULL;
1149 
1150 	memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
1151 	/* Copy relevant information provided by the userspace */
1152 	ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
1153 			 vb, planes);
1154 	if (ret)
1155 		return ret;
1156 
1157 	for (plane = 0; plane < vb->num_planes; ++plane) {
1158 		/* Skip the plane if already verified */
1159 		if (vb->planes[plane].m.userptr &&
1160 			vb->planes[plane].m.userptr == planes[plane].m.userptr
1161 			&& vb->planes[plane].length == planes[plane].length)
1162 			continue;
1163 
1164 		dprintk(q, 3, "userspace address for plane %d changed, reacquiring memory\n",
1165 			plane);
1166 
1167 		/* Check if the provided plane buffer is large enough */
1168 		if (planes[plane].length < vb->planes[plane].min_length) {
1169 			dprintk(q, 1, "provided buffer size %u is less than setup size %u for plane %d\n",
1170 						planes[plane].length,
1171 						vb->planes[plane].min_length,
1172 						plane);
1173 			ret = -EINVAL;
1174 			goto err;
1175 		}
1176 
1177 		/* Release previously acquired memory if present */
1178 		if (vb->planes[plane].mem_priv) {
1179 			if (!reacquired) {
1180 				reacquired = true;
1181 				vb->copied_timestamp = 0;
1182 				call_void_vb_qop(vb, buf_cleanup, vb);
1183 			}
1184 			call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
1185 		}
1186 
1187 		vb->planes[plane].mem_priv = NULL;
1188 		vb->planes[plane].bytesused = 0;
1189 		vb->planes[plane].length = 0;
1190 		vb->planes[plane].m.userptr = 0;
1191 		vb->planes[plane].data_offset = 0;
1192 
1193 		/* Acquire each plane's memory */
1194 		mem_priv = call_ptr_memop(get_userptr,
1195 					  vb,
1196 					  q->alloc_devs[plane] ? : q->dev,
1197 					  planes[plane].m.userptr,
1198 					  planes[plane].length);
1199 		if (IS_ERR(mem_priv)) {
1200 			dprintk(q, 1, "failed acquiring userspace memory for plane %d\n",
1201 				plane);
1202 			ret = PTR_ERR(mem_priv);
1203 			goto err;
1204 		}
1205 		vb->planes[plane].mem_priv = mem_priv;
1206 	}
1207 
1208 	/*
1209 	 * Now that everything is in order, copy relevant information
1210 	 * provided by userspace.
1211 	 */
1212 	for (plane = 0; plane < vb->num_planes; ++plane) {
1213 		vb->planes[plane].bytesused = planes[plane].bytesused;
1214 		vb->planes[plane].length = planes[plane].length;
1215 		vb->planes[plane].m.userptr = planes[plane].m.userptr;
1216 		vb->planes[plane].data_offset = planes[plane].data_offset;
1217 	}
1218 
1219 	if (reacquired) {
1220 		/*
1221 		 * One or more planes changed, so we must call buf_init to do
1222 		 * the driver-specific initialization on the newly acquired
1223 		 * buffer, if provided.
1224 		 */
1225 		ret = call_vb_qop(vb, buf_init, vb);
1226 		if (ret) {
1227 			dprintk(q, 1, "buffer initialization failed\n");
1228 			goto err;
1229 		}
1230 	}
1231 
1232 	ret = call_vb_qop(vb, buf_prepare, vb);
1233 	if (ret) {
1234 		dprintk(q, 1, "buffer preparation failed\n");
1235 		call_void_vb_qop(vb, buf_cleanup, vb);
1236 		goto err;
1237 	}
1238 
1239 	return 0;
1240 err:
1241 	/* In case of errors, release planes that were already acquired */
1242 	for (plane = 0; plane < vb->num_planes; ++plane) {
1243 		if (vb->planes[plane].mem_priv)
1244 			call_void_memop(vb, put_userptr,
1245 				vb->planes[plane].mem_priv);
1246 		vb->planes[plane].mem_priv = NULL;
1247 		vb->planes[plane].m.userptr = 0;
1248 		vb->planes[plane].length = 0;
1249 	}
1250 
1251 	return ret;
1252 }
1253 
1254 /*
1255  * __prepare_dmabuf() - prepare a DMABUF buffer
1256  */
__prepare_dmabuf(struct vb2_buffer * vb)1257 static int __prepare_dmabuf(struct vb2_buffer *vb)
1258 {
1259 	struct vb2_plane planes[VB2_MAX_PLANES];
1260 	struct vb2_queue *q = vb->vb2_queue;
1261 	void *mem_priv;
1262 	unsigned int plane;
1263 	int ret = 0;
1264 	bool reacquired = vb->planes[0].mem_priv == NULL;
1265 
1266 	memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
1267 	/* Copy relevant information provided by the userspace */
1268 	ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
1269 			 vb, planes);
1270 	if (ret)
1271 		return ret;
1272 
1273 	for (plane = 0; plane < vb->num_planes; ++plane) {
1274 		struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
1275 
1276 		if (IS_ERR_OR_NULL(dbuf)) {
1277 			dprintk(q, 1, "invalid dmabuf fd for plane %d\n",
1278 				plane);
1279 			ret = -EINVAL;
1280 			goto err;
1281 		}
1282 
1283 		/* use DMABUF size if length is not provided */
1284 		if (planes[plane].length == 0)
1285 			planes[plane].length = dbuf->size;
1286 
1287 		if (planes[plane].length < vb->planes[plane].min_length) {
1288 			dprintk(q, 1, "invalid dmabuf length %u for plane %d, minimum length %u\n",
1289 				planes[plane].length, plane,
1290 				vb->planes[plane].min_length);
1291 			dma_buf_put(dbuf);
1292 			ret = -EINVAL;
1293 			goto err;
1294 		}
1295 
1296 		/* Skip the plane if already verified */
1297 		if (dbuf == vb->planes[plane].dbuf &&
1298 			vb->planes[plane].length == planes[plane].length) {
1299 			dma_buf_put(dbuf);
1300 			continue;
1301 		}
1302 
1303 		dprintk(q, 3, "buffer for plane %d changed\n", plane);
1304 
1305 		if (!reacquired) {
1306 			reacquired = true;
1307 			vb->copied_timestamp = 0;
1308 			call_void_vb_qop(vb, buf_cleanup, vb);
1309 		}
1310 
1311 		/* Release previously acquired memory if present */
1312 		__vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
1313 		vb->planes[plane].bytesused = 0;
1314 		vb->planes[plane].length = 0;
1315 		vb->planes[plane].m.fd = 0;
1316 		vb->planes[plane].data_offset = 0;
1317 
1318 		/* Acquire each plane's memory */
1319 		mem_priv = call_ptr_memop(attach_dmabuf,
1320 					  vb,
1321 					  q->alloc_devs[plane] ? : q->dev,
1322 					  dbuf,
1323 					  planes[plane].length);
1324 		if (IS_ERR(mem_priv)) {
1325 			dprintk(q, 1, "failed to attach dmabuf\n");
1326 			ret = PTR_ERR(mem_priv);
1327 			dma_buf_put(dbuf);
1328 			goto err;
1329 		}
1330 
1331 		vb->planes[plane].dbuf = dbuf;
1332 		vb->planes[plane].mem_priv = mem_priv;
1333 	}
1334 
1335 	/*
1336 	 * This pins the buffer(s) with dma_buf_map_attachment()). It's done
1337 	 * here instead just before the DMA, while queueing the buffer(s) so
1338 	 * userspace knows sooner rather than later if the dma-buf map fails.
1339 	 */
1340 	for (plane = 0; plane < vb->num_planes; ++plane) {
1341 		if (vb->planes[plane].dbuf_mapped)
1342 			continue;
1343 
1344 		ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
1345 		if (ret) {
1346 			dprintk(q, 1, "failed to map dmabuf for plane %d\n",
1347 				plane);
1348 			goto err;
1349 		}
1350 		vb->planes[plane].dbuf_mapped = 1;
1351 	}
1352 
1353 	/*
1354 	 * Now that everything is in order, copy relevant information
1355 	 * provided by userspace.
1356 	 */
1357 	for (plane = 0; plane < vb->num_planes; ++plane) {
1358 		vb->planes[plane].bytesused = planes[plane].bytesused;
1359 		vb->planes[plane].length = planes[plane].length;
1360 		vb->planes[plane].m.fd = planes[plane].m.fd;
1361 		vb->planes[plane].data_offset = planes[plane].data_offset;
1362 	}
1363 
1364 	if (reacquired) {
1365 		/*
1366 		 * Call driver-specific initialization on the newly acquired buffer,
1367 		 * if provided.
1368 		 */
1369 		ret = call_vb_qop(vb, buf_init, vb);
1370 		if (ret) {
1371 			dprintk(q, 1, "buffer initialization failed\n");
1372 			goto err;
1373 		}
1374 	}
1375 
1376 	ret = call_vb_qop(vb, buf_prepare, vb);
1377 	if (ret) {
1378 		dprintk(q, 1, "buffer preparation failed\n");
1379 		call_void_vb_qop(vb, buf_cleanup, vb);
1380 		goto err;
1381 	}
1382 
1383 	return 0;
1384 err:
1385 	/* In case of errors, release planes that were already acquired */
1386 	__vb2_buf_dmabuf_put(vb);
1387 
1388 	return ret;
1389 }
1390 
1391 /*
1392  * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
1393  */
__enqueue_in_driver(struct vb2_buffer * vb)1394 static void __enqueue_in_driver(struct vb2_buffer *vb)
1395 {
1396 	struct vb2_queue *q = vb->vb2_queue;
1397 
1398 	vb->state = VB2_BUF_STATE_ACTIVE;
1399 	atomic_inc(&q->owned_by_drv_count);
1400 
1401 	trace_vb2_buf_queue(q, vb);
1402 
1403 	call_void_vb_qop(vb, buf_queue, vb);
1404 }
1405 
__buf_prepare(struct vb2_buffer * vb)1406 static int __buf_prepare(struct vb2_buffer *vb)
1407 {
1408 	struct vb2_queue *q = vb->vb2_queue;
1409 	enum vb2_buffer_state orig_state = vb->state;
1410 	int ret;
1411 
1412 	if (q->error) {
1413 		dprintk(q, 1, "fatal error occurred on queue\n");
1414 		return -EIO;
1415 	}
1416 
1417 	if (vb->prepared)
1418 		return 0;
1419 	WARN_ON(vb->synced);
1420 
1421 	if (q->is_output) {
1422 		ret = call_vb_qop(vb, buf_out_validate, vb);
1423 		if (ret) {
1424 			dprintk(q, 1, "buffer validation failed\n");
1425 			return ret;
1426 		}
1427 	}
1428 
1429 	vb->state = VB2_BUF_STATE_PREPARING;
1430 
1431 	switch (q->memory) {
1432 	case VB2_MEMORY_MMAP:
1433 		ret = __prepare_mmap(vb);
1434 		break;
1435 	case VB2_MEMORY_USERPTR:
1436 		ret = __prepare_userptr(vb);
1437 		break;
1438 	case VB2_MEMORY_DMABUF:
1439 		ret = __prepare_dmabuf(vb);
1440 		break;
1441 	default:
1442 		WARN(1, "Invalid queue type\n");
1443 		ret = -EINVAL;
1444 		break;
1445 	}
1446 
1447 	if (ret) {
1448 		dprintk(q, 1, "buffer preparation failed: %d\n", ret);
1449 		vb->state = orig_state;
1450 		return ret;
1451 	}
1452 
1453 	__vb2_buf_mem_prepare(vb);
1454 	vb->prepared = 1;
1455 	vb->state = orig_state;
1456 
1457 	return 0;
1458 }
1459 
vb2_req_prepare(struct media_request_object * obj)1460 static int vb2_req_prepare(struct media_request_object *obj)
1461 {
1462 	struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
1463 	int ret;
1464 
1465 	if (WARN_ON(vb->state != VB2_BUF_STATE_IN_REQUEST))
1466 		return -EINVAL;
1467 
1468 	mutex_lock(vb->vb2_queue->lock);
1469 	ret = __buf_prepare(vb);
1470 	mutex_unlock(vb->vb2_queue->lock);
1471 	return ret;
1472 }
1473 
1474 static void __vb2_dqbuf(struct vb2_buffer *vb);
1475 
vb2_req_unprepare(struct media_request_object * obj)1476 static void vb2_req_unprepare(struct media_request_object *obj)
1477 {
1478 	struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
1479 
1480 	mutex_lock(vb->vb2_queue->lock);
1481 	__vb2_dqbuf(vb);
1482 	vb->state = VB2_BUF_STATE_IN_REQUEST;
1483 	mutex_unlock(vb->vb2_queue->lock);
1484 	WARN_ON(!vb->req_obj.req);
1485 }
1486 
1487 int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
1488 		  struct media_request *req);
1489 
vb2_req_queue(struct media_request_object * obj)1490 static void vb2_req_queue(struct media_request_object *obj)
1491 {
1492 	struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
1493 	int err;
1494 
1495 	mutex_lock(vb->vb2_queue->lock);
1496 	/*
1497 	 * There is no method to propagate an error from vb2_core_qbuf(),
1498 	 * so if this returns a non-0 value, then WARN.
1499 	 *
1500 	 * The only exception is -EIO which is returned if q->error is
1501 	 * set. We just ignore that, and expect this will be caught the
1502 	 * next time vb2_req_prepare() is called.
1503 	 */
1504 	err = vb2_core_qbuf(vb->vb2_queue, vb->index, NULL, NULL);
1505 	WARN_ON_ONCE(err && err != -EIO);
1506 	mutex_unlock(vb->vb2_queue->lock);
1507 }
1508 
vb2_req_unbind(struct media_request_object * obj)1509 static void vb2_req_unbind(struct media_request_object *obj)
1510 {
1511 	struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
1512 
1513 	if (vb->state == VB2_BUF_STATE_IN_REQUEST)
1514 		call_void_bufop(vb->vb2_queue, init_buffer, vb);
1515 }
1516 
vb2_req_release(struct media_request_object * obj)1517 static void vb2_req_release(struct media_request_object *obj)
1518 {
1519 	struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
1520 
1521 	if (vb->state == VB2_BUF_STATE_IN_REQUEST) {
1522 		vb->state = VB2_BUF_STATE_DEQUEUED;
1523 		if (vb->request)
1524 			media_request_put(vb->request);
1525 		vb->request = NULL;
1526 	}
1527 }
1528 
1529 static const struct media_request_object_ops vb2_core_req_ops = {
1530 	.prepare = vb2_req_prepare,
1531 	.unprepare = vb2_req_unprepare,
1532 	.queue = vb2_req_queue,
1533 	.unbind = vb2_req_unbind,
1534 	.release = vb2_req_release,
1535 };
1536 
vb2_request_object_is_buffer(struct media_request_object * obj)1537 bool vb2_request_object_is_buffer(struct media_request_object *obj)
1538 {
1539 	return obj->ops == &vb2_core_req_ops;
1540 }
1541 EXPORT_SYMBOL_GPL(vb2_request_object_is_buffer);
1542 
vb2_request_buffer_cnt(struct media_request * req)1543 unsigned int vb2_request_buffer_cnt(struct media_request *req)
1544 {
1545 	struct media_request_object *obj;
1546 	unsigned long flags;
1547 	unsigned int buffer_cnt = 0;
1548 
1549 	spin_lock_irqsave(&req->lock, flags);
1550 	list_for_each_entry(obj, &req->objects, list)
1551 		if (vb2_request_object_is_buffer(obj))
1552 			buffer_cnt++;
1553 	spin_unlock_irqrestore(&req->lock, flags);
1554 
1555 	return buffer_cnt;
1556 }
1557 EXPORT_SYMBOL_GPL(vb2_request_buffer_cnt);
1558 
vb2_core_prepare_buf(struct vb2_queue * q,unsigned int index,void * pb)1559 int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
1560 {
1561 	struct vb2_buffer *vb;
1562 	int ret;
1563 
1564 	vb = q->bufs[index];
1565 	if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1566 		dprintk(q, 1, "invalid buffer state %s\n",
1567 			vb2_state_name(vb->state));
1568 		return -EINVAL;
1569 	}
1570 	if (vb->prepared) {
1571 		dprintk(q, 1, "buffer already prepared\n");
1572 		return -EINVAL;
1573 	}
1574 
1575 	ret = __buf_prepare(vb);
1576 	if (ret)
1577 		return ret;
1578 
1579 	/* Fill buffer information for the userspace */
1580 	call_void_bufop(q, fill_user_buffer, vb, pb);
1581 
1582 	dprintk(q, 2, "prepare of buffer %d succeeded\n", vb->index);
1583 
1584 	return 0;
1585 }
1586 EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
1587 
1588 /*
1589  * vb2_start_streaming() - Attempt to start streaming.
1590  * @q:		videobuf2 queue
1591  *
1592  * Attempt to start streaming. When this function is called there must be
1593  * at least q->min_buffers_needed buffers queued up (i.e. the minimum
1594  * number of buffers required for the DMA engine to function). If the
1595  * @start_streaming op fails it is supposed to return all the driver-owned
1596  * buffers back to vb2 in state QUEUED. Check if that happened and if
1597  * not warn and reclaim them forcefully.
1598  */
vb2_start_streaming(struct vb2_queue * q)1599 static int vb2_start_streaming(struct vb2_queue *q)
1600 {
1601 	struct vb2_buffer *vb;
1602 	int ret;
1603 
1604 	/*
1605 	 * If any buffers were queued before streamon,
1606 	 * we can now pass them to driver for processing.
1607 	 */
1608 	list_for_each_entry(vb, &q->queued_list, queued_entry)
1609 		__enqueue_in_driver(vb);
1610 
1611 	/* Tell the driver to start streaming */
1612 	q->start_streaming_called = 1;
1613 	ret = call_qop(q, start_streaming, q,
1614 		       atomic_read(&q->owned_by_drv_count));
1615 	if (!ret)
1616 		return 0;
1617 
1618 	q->start_streaming_called = 0;
1619 
1620 	dprintk(q, 1, "driver refused to start streaming\n");
1621 	/*
1622 	 * If you see this warning, then the driver isn't cleaning up properly
1623 	 * after a failed start_streaming(). See the start_streaming()
1624 	 * documentation in videobuf2-core.h for more information how buffers
1625 	 * should be returned to vb2 in start_streaming().
1626 	 */
1627 	if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
1628 		unsigned i;
1629 
1630 		/*
1631 		 * Forcefully reclaim buffers if the driver did not
1632 		 * correctly return them to vb2.
1633 		 */
1634 		for (i = 0; i < q->num_buffers; ++i) {
1635 			vb = q->bufs[i];
1636 			if (vb->state == VB2_BUF_STATE_ACTIVE)
1637 				vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
1638 		}
1639 		/* Must be zero now */
1640 		WARN_ON(atomic_read(&q->owned_by_drv_count));
1641 	}
1642 	/*
1643 	 * If done_list is not empty, then start_streaming() didn't call
1644 	 * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or
1645 	 * STATE_DONE.
1646 	 */
1647 	WARN_ON(!list_empty(&q->done_list));
1648 	return ret;
1649 }
1650 
vb2_core_qbuf(struct vb2_queue * q,unsigned int index,void * pb,struct media_request * req)1651 int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
1652 		  struct media_request *req)
1653 {
1654 	struct vb2_buffer *vb;
1655 	enum vb2_buffer_state orig_state;
1656 	int ret;
1657 
1658 	if (q->error) {
1659 		dprintk(q, 1, "fatal error occurred on queue\n");
1660 		return -EIO;
1661 	}
1662 
1663 	vb = q->bufs[index];
1664 
1665 	if (!req && vb->state != VB2_BUF_STATE_IN_REQUEST &&
1666 	    q->requires_requests) {
1667 		dprintk(q, 1, "qbuf requires a request\n");
1668 		return -EBADR;
1669 	}
1670 
1671 	if ((req && q->uses_qbuf) ||
1672 	    (!req && vb->state != VB2_BUF_STATE_IN_REQUEST &&
1673 	     q->uses_requests)) {
1674 		dprintk(q, 1, "queue in wrong mode (qbuf vs requests)\n");
1675 		return -EBUSY;
1676 	}
1677 
1678 	if (req) {
1679 		int ret;
1680 
1681 		q->uses_requests = 1;
1682 		if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1683 			dprintk(q, 1, "buffer %d not in dequeued state\n",
1684 				vb->index);
1685 			return -EINVAL;
1686 		}
1687 
1688 		if (q->is_output && !vb->prepared) {
1689 			ret = call_vb_qop(vb, buf_out_validate, vb);
1690 			if (ret) {
1691 				dprintk(q, 1, "buffer validation failed\n");
1692 				return ret;
1693 			}
1694 		}
1695 
1696 		media_request_object_init(&vb->req_obj);
1697 
1698 		/* Make sure the request is in a safe state for updating. */
1699 		ret = media_request_lock_for_update(req);
1700 		if (ret)
1701 			return ret;
1702 		ret = media_request_object_bind(req, &vb2_core_req_ops,
1703 						q, true, &vb->req_obj);
1704 		media_request_unlock_for_update(req);
1705 		if (ret)
1706 			return ret;
1707 
1708 		vb->state = VB2_BUF_STATE_IN_REQUEST;
1709 
1710 		/*
1711 		 * Increment the refcount and store the request.
1712 		 * The request refcount is decremented again when the
1713 		 * buffer is dequeued. This is to prevent vb2_buffer_done()
1714 		 * from freeing the request from interrupt context, which can
1715 		 * happen if the application closed the request fd after
1716 		 * queueing the request.
1717 		 */
1718 		media_request_get(req);
1719 		vb->request = req;
1720 
1721 		/* Fill buffer information for the userspace */
1722 		if (pb) {
1723 			call_void_bufop(q, copy_timestamp, vb, pb);
1724 			call_void_bufop(q, fill_user_buffer, vb, pb);
1725 		}
1726 
1727 		dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index);
1728 		return 0;
1729 	}
1730 
1731 	if (vb->state != VB2_BUF_STATE_IN_REQUEST)
1732 		q->uses_qbuf = 1;
1733 
1734 	switch (vb->state) {
1735 	case VB2_BUF_STATE_DEQUEUED:
1736 	case VB2_BUF_STATE_IN_REQUEST:
1737 		if (!vb->prepared) {
1738 			ret = __buf_prepare(vb);
1739 			if (ret)
1740 				return ret;
1741 		}
1742 		break;
1743 	case VB2_BUF_STATE_PREPARING:
1744 		dprintk(q, 1, "buffer still being prepared\n");
1745 		return -EINVAL;
1746 	default:
1747 		dprintk(q, 1, "invalid buffer state %s\n",
1748 			vb2_state_name(vb->state));
1749 		return -EINVAL;
1750 	}
1751 
1752 	/*
1753 	 * Add to the queued buffers list, a buffer will stay on it until
1754 	 * dequeued in dqbuf.
1755 	 */
1756 	orig_state = vb->state;
1757 	list_add_tail(&vb->queued_entry, &q->queued_list);
1758 	q->queued_count++;
1759 	q->waiting_for_buffers = false;
1760 	vb->state = VB2_BUF_STATE_QUEUED;
1761 
1762 	if (pb)
1763 		call_void_bufop(q, copy_timestamp, vb, pb);
1764 
1765 	trace_vb2_qbuf(q, vb);
1766 
1767 	/*
1768 	 * If already streaming, give the buffer to driver for processing.
1769 	 * If not, the buffer will be given to driver on next streamon.
1770 	 */
1771 	if (q->start_streaming_called)
1772 		__enqueue_in_driver(vb);
1773 
1774 	/* Fill buffer information for the userspace */
1775 	if (pb)
1776 		call_void_bufop(q, fill_user_buffer, vb, pb);
1777 
1778 	/*
1779 	 * If streamon has been called, and we haven't yet called
1780 	 * start_streaming() since not enough buffers were queued, and
1781 	 * we now have reached the minimum number of queued buffers,
1782 	 * then we can finally call start_streaming().
1783 	 */
1784 	if (q->streaming && !q->start_streaming_called &&
1785 	    q->queued_count >= q->min_buffers_needed) {
1786 		ret = vb2_start_streaming(q);
1787 		if (ret) {
1788 			/*
1789 			 * Since vb2_core_qbuf will return with an error,
1790 			 * we should return it to state DEQUEUED since
1791 			 * the error indicates that the buffer wasn't queued.
1792 			 */
1793 			list_del(&vb->queued_entry);
1794 			q->queued_count--;
1795 			vb->state = orig_state;
1796 			return ret;
1797 		}
1798 	}
1799 
1800 	dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index);
1801 	return 0;
1802 }
1803 EXPORT_SYMBOL_GPL(vb2_core_qbuf);
1804 
1805 /*
1806  * __vb2_wait_for_done_vb() - wait for a buffer to become available
1807  * for dequeuing
1808  *
1809  * Will sleep if required for nonblocking == false.
1810  */
__vb2_wait_for_done_vb(struct vb2_queue * q,int nonblocking)1811 static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1812 {
1813 	/*
1814 	 * All operations on vb_done_list are performed under done_lock
1815 	 * spinlock protection. However, buffers may be removed from
1816 	 * it and returned to userspace only while holding both driver's
1817 	 * lock and the done_lock spinlock. Thus we can be sure that as
1818 	 * long as we hold the driver's lock, the list will remain not
1819 	 * empty if list_empty() check succeeds.
1820 	 */
1821 
1822 	for (;;) {
1823 		int ret;
1824 
1825 		if (q->waiting_in_dqbuf) {
1826 			dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
1827 			return -EBUSY;
1828 		}
1829 
1830 		if (!q->streaming) {
1831 			dprintk(q, 1, "streaming off, will not wait for buffers\n");
1832 			return -EINVAL;
1833 		}
1834 
1835 		if (q->error) {
1836 			dprintk(q, 1, "Queue in error state, will not wait for buffers\n");
1837 			return -EIO;
1838 		}
1839 
1840 		if (q->last_buffer_dequeued) {
1841 			dprintk(q, 3, "last buffer dequeued already, will not wait for buffers\n");
1842 			return -EPIPE;
1843 		}
1844 
1845 		if (!list_empty(&q->done_list)) {
1846 			/*
1847 			 * Found a buffer that we were waiting for.
1848 			 */
1849 			break;
1850 		}
1851 
1852 		if (nonblocking) {
1853 			dprintk(q, 3, "nonblocking and no buffers to dequeue, will not wait\n");
1854 			return -EAGAIN;
1855 		}
1856 
1857 		q->waiting_in_dqbuf = 1;
1858 		/*
1859 		 * We are streaming and blocking, wait for another buffer to
1860 		 * become ready or for streamoff. Driver's lock is released to
1861 		 * allow streamoff or qbuf to be called while waiting.
1862 		 */
1863 		call_void_qop(q, wait_prepare, q);
1864 
1865 		/*
1866 		 * All locks have been released, it is safe to sleep now.
1867 		 */
1868 		dprintk(q, 3, "will sleep waiting for buffers\n");
1869 		ret = wait_event_interruptible(q->done_wq,
1870 				!list_empty(&q->done_list) || !q->streaming ||
1871 				q->error);
1872 
1873 		/*
1874 		 * We need to reevaluate both conditions again after reacquiring
1875 		 * the locks or return an error if one occurred.
1876 		 */
1877 		call_void_qop(q, wait_finish, q);
1878 		q->waiting_in_dqbuf = 0;
1879 		if (ret) {
1880 			dprintk(q, 1, "sleep was interrupted\n");
1881 			return ret;
1882 		}
1883 	}
1884 	return 0;
1885 }
1886 
1887 /*
1888  * __vb2_get_done_vb() - get a buffer ready for dequeuing
1889  *
1890  * Will sleep if required for nonblocking == false.
1891  */
__vb2_get_done_vb(struct vb2_queue * q,struct vb2_buffer ** vb,void * pb,int nonblocking)1892 static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1893 			     void *pb, int nonblocking)
1894 {
1895 	unsigned long flags;
1896 	int ret = 0;
1897 
1898 	/*
1899 	 * Wait for at least one buffer to become available on the done_list.
1900 	 */
1901 	ret = __vb2_wait_for_done_vb(q, nonblocking);
1902 	if (ret)
1903 		return ret;
1904 
1905 	/*
1906 	 * Driver's lock has been held since we last verified that done_list
1907 	 * is not empty, so no need for another list_empty(done_list) check.
1908 	 */
1909 	spin_lock_irqsave(&q->done_lock, flags);
1910 	*vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
1911 	/*
1912 	 * Only remove the buffer from done_list if all planes can be
1913 	 * handled. Some cases such as V4L2 file I/O and DVB have pb
1914 	 * == NULL; skip the check then as there's nothing to verify.
1915 	 */
1916 	if (pb)
1917 		ret = call_bufop(q, verify_planes_array, *vb, pb);
1918 	if (!ret)
1919 		list_del(&(*vb)->done_entry);
1920 	spin_unlock_irqrestore(&q->done_lock, flags);
1921 
1922 	return ret;
1923 }
1924 
vb2_wait_for_all_buffers(struct vb2_queue * q)1925 int vb2_wait_for_all_buffers(struct vb2_queue *q)
1926 {
1927 	if (!q->streaming) {
1928 		dprintk(q, 1, "streaming off, will not wait for buffers\n");
1929 		return -EINVAL;
1930 	}
1931 
1932 	if (q->start_streaming_called)
1933 		wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
1934 	return 0;
1935 }
1936 EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
1937 
1938 /*
1939  * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
1940  */
__vb2_dqbuf(struct vb2_buffer * vb)1941 static void __vb2_dqbuf(struct vb2_buffer *vb)
1942 {
1943 	struct vb2_queue *q = vb->vb2_queue;
1944 
1945 	/* nothing to do if the buffer is already dequeued */
1946 	if (vb->state == VB2_BUF_STATE_DEQUEUED)
1947 		return;
1948 
1949 	vb->state = VB2_BUF_STATE_DEQUEUED;
1950 
1951 	call_void_bufop(q, init_buffer, vb);
1952 }
1953 
vb2_core_dqbuf(struct vb2_queue * q,unsigned int * pindex,void * pb,bool nonblocking)1954 int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
1955 		   bool nonblocking)
1956 {
1957 	struct vb2_buffer *vb = NULL;
1958 	int ret;
1959 
1960 	ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
1961 	if (ret < 0)
1962 		return ret;
1963 
1964 	switch (vb->state) {
1965 	case VB2_BUF_STATE_DONE:
1966 		dprintk(q, 3, "returning done buffer\n");
1967 		break;
1968 	case VB2_BUF_STATE_ERROR:
1969 		dprintk(q, 3, "returning done buffer with errors\n");
1970 		break;
1971 	default:
1972 		dprintk(q, 1, "invalid buffer state %s\n",
1973 			vb2_state_name(vb->state));
1974 		return -EINVAL;
1975 	}
1976 
1977 	call_void_vb_qop(vb, buf_finish, vb);
1978 	vb->prepared = 0;
1979 
1980 	if (pindex)
1981 		*pindex = vb->index;
1982 
1983 	/* Fill buffer information for the userspace */
1984 	if (pb)
1985 		call_void_bufop(q, fill_user_buffer, vb, pb);
1986 
1987 	/* Remove from vb2 queue */
1988 	list_del(&vb->queued_entry);
1989 	q->queued_count--;
1990 
1991 	trace_vb2_dqbuf(q, vb);
1992 
1993 	/* go back to dequeued state */
1994 	__vb2_dqbuf(vb);
1995 
1996 	if (WARN_ON(vb->req_obj.req)) {
1997 		media_request_object_unbind(&vb->req_obj);
1998 		media_request_object_put(&vb->req_obj);
1999 	}
2000 	if (vb->request)
2001 		media_request_put(vb->request);
2002 	vb->request = NULL;
2003 
2004 	dprintk(q, 2, "dqbuf of buffer %d, state: %s\n",
2005 		vb->index, vb2_state_name(vb->state));
2006 
2007 	return 0;
2008 
2009 }
2010 EXPORT_SYMBOL_GPL(vb2_core_dqbuf);
2011 
2012 /*
2013  * __vb2_queue_cancel() - cancel and stop (pause) streaming
2014  *
2015  * Removes all queued buffers from driver's queue and all buffers queued by
2016  * userspace from vb2's queue. Returns to state after reqbufs.
2017  */
__vb2_queue_cancel(struct vb2_queue * q)2018 static void __vb2_queue_cancel(struct vb2_queue *q)
2019 {
2020 	unsigned int i;
2021 
2022 	/*
2023 	 * Tell driver to stop all transactions and release all queued
2024 	 * buffers.
2025 	 */
2026 	if (q->start_streaming_called)
2027 		call_void_qop(q, stop_streaming, q);
2028 
2029 	/*
2030 	 * If you see this warning, then the driver isn't cleaning up properly
2031 	 * in stop_streaming(). See the stop_streaming() documentation in
2032 	 * videobuf2-core.h for more information how buffers should be returned
2033 	 * to vb2 in stop_streaming().
2034 	 */
2035 	if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
2036 		for (i = 0; i < q->num_buffers; ++i)
2037 			if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) {
2038 				pr_warn("driver bug: stop_streaming operation is leaving buf %p in active state\n",
2039 					q->bufs[i]);
2040 				vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
2041 			}
2042 		/* Must be zero now */
2043 		WARN_ON(atomic_read(&q->owned_by_drv_count));
2044 	}
2045 
2046 	q->streaming = 0;
2047 	q->start_streaming_called = 0;
2048 	q->queued_count = 0;
2049 	q->error = 0;
2050 	q->uses_requests = 0;
2051 	q->uses_qbuf = 0;
2052 
2053 	/*
2054 	 * Remove all buffers from vb2's list...
2055 	 */
2056 	INIT_LIST_HEAD(&q->queued_list);
2057 	/*
2058 	 * ...and done list; userspace will not receive any buffers it
2059 	 * has not already dequeued before initiating cancel.
2060 	 */
2061 	INIT_LIST_HEAD(&q->done_list);
2062 	atomic_set(&q->owned_by_drv_count, 0);
2063 	wake_up_all(&q->done_wq);
2064 
2065 	/*
2066 	 * Reinitialize all buffers for next use.
2067 	 * Make sure to call buf_finish for any queued buffers. Normally
2068 	 * that's done in dqbuf, but that's not going to happen when we
2069 	 * cancel the whole queue. Note: this code belongs here, not in
2070 	 * __vb2_dqbuf() since in vb2_core_dqbuf() there is a critical
2071 	 * call to __fill_user_buffer() after buf_finish(). That order can't
2072 	 * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
2073 	 */
2074 	for (i = 0; i < q->num_buffers; ++i) {
2075 		struct vb2_buffer *vb = q->bufs[i];
2076 		struct media_request *req = vb->req_obj.req;
2077 
2078 		/*
2079 		 * If a request is associated with this buffer, then
2080 		 * call buf_request_cancel() to give the driver to complete()
2081 		 * related request objects. Otherwise those objects would
2082 		 * never complete.
2083 		 */
2084 		if (req) {
2085 			enum media_request_state state;
2086 			unsigned long flags;
2087 
2088 			spin_lock_irqsave(&req->lock, flags);
2089 			state = req->state;
2090 			spin_unlock_irqrestore(&req->lock, flags);
2091 
2092 			if (state == MEDIA_REQUEST_STATE_QUEUED)
2093 				call_void_vb_qop(vb, buf_request_complete, vb);
2094 		}
2095 
2096 		__vb2_buf_mem_finish(vb);
2097 
2098 		if (vb->prepared) {
2099 			call_void_vb_qop(vb, buf_finish, vb);
2100 			vb->prepared = 0;
2101 		}
2102 		__vb2_dqbuf(vb);
2103 
2104 		if (vb->req_obj.req) {
2105 			media_request_object_unbind(&vb->req_obj);
2106 			media_request_object_put(&vb->req_obj);
2107 		}
2108 		if (vb->request)
2109 			media_request_put(vb->request);
2110 		vb->request = NULL;
2111 		vb->copied_timestamp = 0;
2112 	}
2113 }
2114 
vb2_core_streamon(struct vb2_queue * q,unsigned int type)2115 int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
2116 {
2117 	int ret;
2118 
2119 	if (type != q->type) {
2120 		dprintk(q, 1, "invalid stream type\n");
2121 		return -EINVAL;
2122 	}
2123 
2124 	if (q->streaming) {
2125 		dprintk(q, 3, "already streaming\n");
2126 		return 0;
2127 	}
2128 
2129 	if (!q->num_buffers) {
2130 		dprintk(q, 1, "no buffers have been allocated\n");
2131 		return -EINVAL;
2132 	}
2133 
2134 	if (q->num_buffers < q->min_buffers_needed) {
2135 		dprintk(q, 1, "need at least %u allocated buffers\n",
2136 				q->min_buffers_needed);
2137 		return -EINVAL;
2138 	}
2139 
2140 	/*
2141 	 * Tell driver to start streaming provided sufficient buffers
2142 	 * are available.
2143 	 */
2144 	if (q->queued_count >= q->min_buffers_needed) {
2145 		ret = v4l_vb2q_enable_media_source(q);
2146 		if (ret)
2147 			return ret;
2148 		ret = vb2_start_streaming(q);
2149 		if (ret)
2150 			return ret;
2151 	}
2152 
2153 	q->streaming = 1;
2154 
2155 	dprintk(q, 3, "successful\n");
2156 	return 0;
2157 }
2158 EXPORT_SYMBOL_GPL(vb2_core_streamon);
2159 
vb2_queue_error(struct vb2_queue * q)2160 void vb2_queue_error(struct vb2_queue *q)
2161 {
2162 	q->error = 1;
2163 
2164 	wake_up_all(&q->done_wq);
2165 }
2166 EXPORT_SYMBOL_GPL(vb2_queue_error);
2167 
vb2_core_streamoff(struct vb2_queue * q,unsigned int type)2168 int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
2169 {
2170 	if (type != q->type) {
2171 		dprintk(q, 1, "invalid stream type\n");
2172 		return -EINVAL;
2173 	}
2174 
2175 	/*
2176 	 * Cancel will pause streaming and remove all buffers from the driver
2177 	 * and vb2, effectively returning control over them to userspace.
2178 	 *
2179 	 * Note that we do this even if q->streaming == 0: if you prepare or
2180 	 * queue buffers, and then call streamoff without ever having called
2181 	 * streamon, you would still expect those buffers to be returned to
2182 	 * their normal dequeued state.
2183 	 */
2184 	__vb2_queue_cancel(q);
2185 	q->waiting_for_buffers = !q->is_output;
2186 	q->last_buffer_dequeued = false;
2187 
2188 	dprintk(q, 3, "successful\n");
2189 	return 0;
2190 }
2191 EXPORT_SYMBOL_GPL(vb2_core_streamoff);
2192 
2193 /*
2194  * __find_plane_by_offset() - find plane associated with the given offset off
2195  */
__find_plane_by_offset(struct vb2_queue * q,unsigned long off,unsigned int * _buffer,unsigned int * _plane)2196 static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
2197 			unsigned int *_buffer, unsigned int *_plane)
2198 {
2199 	struct vb2_buffer *vb;
2200 	unsigned int buffer, plane;
2201 
2202 	/*
2203 	 * Sanity checks to ensure the lock is held, MEMORY_MMAP is
2204 	 * used and fileio isn't active.
2205 	 */
2206 	lockdep_assert_held(&q->mmap_lock);
2207 
2208 	if (q->memory != VB2_MEMORY_MMAP) {
2209 		dprintk(q, 1, "queue is not currently set up for mmap\n");
2210 		return -EINVAL;
2211 	}
2212 
2213 	if (vb2_fileio_is_active(q)) {
2214 		dprintk(q, 1, "file io in progress\n");
2215 		return -EBUSY;
2216 	}
2217 
2218 	/*
2219 	 * Go over all buffers and their planes, comparing the given offset
2220 	 * with an offset assigned to each plane. If a match is found,
2221 	 * return its buffer and plane numbers.
2222 	 */
2223 	for (buffer = 0; buffer < q->num_buffers; ++buffer) {
2224 		vb = q->bufs[buffer];
2225 
2226 		for (plane = 0; plane < vb->num_planes; ++plane) {
2227 			if (vb->planes[plane].m.offset == off) {
2228 				*_buffer = buffer;
2229 				*_plane = plane;
2230 				return 0;
2231 			}
2232 		}
2233 	}
2234 
2235 	return -EINVAL;
2236 }
2237 
vb2_core_expbuf(struct vb2_queue * q,int * fd,unsigned int type,unsigned int index,unsigned int plane,unsigned int flags)2238 int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
2239 		unsigned int index, unsigned int plane, unsigned int flags)
2240 {
2241 	struct vb2_buffer *vb = NULL;
2242 	struct vb2_plane *vb_plane;
2243 	int ret;
2244 	struct dma_buf *dbuf;
2245 
2246 	if (q->memory != VB2_MEMORY_MMAP) {
2247 		dprintk(q, 1, "queue is not currently set up for mmap\n");
2248 		return -EINVAL;
2249 	}
2250 
2251 	if (!q->mem_ops->get_dmabuf) {
2252 		dprintk(q, 1, "queue does not support DMA buffer exporting\n");
2253 		return -EINVAL;
2254 	}
2255 
2256 	if (flags & ~(O_CLOEXEC | O_ACCMODE)) {
2257 		dprintk(q, 1, "queue does support only O_CLOEXEC and access mode flags\n");
2258 		return -EINVAL;
2259 	}
2260 
2261 	if (type != q->type) {
2262 		dprintk(q, 1, "invalid buffer type\n");
2263 		return -EINVAL;
2264 	}
2265 
2266 	if (index >= q->num_buffers) {
2267 		dprintk(q, 1, "buffer index out of range\n");
2268 		return -EINVAL;
2269 	}
2270 
2271 	vb = q->bufs[index];
2272 
2273 	if (plane >= vb->num_planes) {
2274 		dprintk(q, 1, "buffer plane out of range\n");
2275 		return -EINVAL;
2276 	}
2277 
2278 	if (vb2_fileio_is_active(q)) {
2279 		dprintk(q, 1, "expbuf: file io in progress\n");
2280 		return -EBUSY;
2281 	}
2282 
2283 	vb_plane = &vb->planes[plane];
2284 
2285 	dbuf = call_ptr_memop(get_dmabuf,
2286 			      vb,
2287 			      vb_plane->mem_priv,
2288 			      flags & O_ACCMODE);
2289 	if (IS_ERR_OR_NULL(dbuf)) {
2290 		dprintk(q, 1, "failed to export buffer %d, plane %d\n",
2291 			index, plane);
2292 		return -EINVAL;
2293 	}
2294 
2295 	ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE);
2296 	if (ret < 0) {
2297 		dprintk(q, 3, "buffer %d, plane %d failed to export (%d)\n",
2298 			index, plane, ret);
2299 		dma_buf_put(dbuf);
2300 		return ret;
2301 	}
2302 
2303 	dprintk(q, 3, "buffer %d, plane %d exported as %d descriptor\n",
2304 		index, plane, ret);
2305 	*fd = ret;
2306 
2307 	return 0;
2308 }
2309 EXPORT_SYMBOL_GPL(vb2_core_expbuf);
2310 
vb2_mmap(struct vb2_queue * q,struct vm_area_struct * vma)2311 int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
2312 {
2313 	unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
2314 	struct vb2_buffer *vb;
2315 	unsigned int buffer = 0, plane = 0;
2316 	int ret;
2317 	unsigned long length;
2318 
2319 	/*
2320 	 * Check memory area access mode.
2321 	 */
2322 	if (!(vma->vm_flags & VM_SHARED)) {
2323 		dprintk(q, 1, "invalid vma flags, VM_SHARED needed\n");
2324 		return -EINVAL;
2325 	}
2326 	if (q->is_output) {
2327 		if (!(vma->vm_flags & VM_WRITE)) {
2328 			dprintk(q, 1, "invalid vma flags, VM_WRITE needed\n");
2329 			return -EINVAL;
2330 		}
2331 	} else {
2332 		if (!(vma->vm_flags & VM_READ)) {
2333 			dprintk(q, 1, "invalid vma flags, VM_READ needed\n");
2334 			return -EINVAL;
2335 		}
2336 	}
2337 
2338 	mutex_lock(&q->mmap_lock);
2339 
2340 	/*
2341 	 * Find the plane corresponding to the offset passed by userspace. This
2342 	 * will return an error if not MEMORY_MMAP or file I/O is in progress.
2343 	 */
2344 	ret = __find_plane_by_offset(q, off, &buffer, &plane);
2345 	if (ret)
2346 		goto unlock;
2347 
2348 	vb = q->bufs[buffer];
2349 
2350 	/*
2351 	 * MMAP requires page_aligned buffers.
2352 	 * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
2353 	 * so, we need to do the same here.
2354 	 */
2355 	length = PAGE_ALIGN(vb->planes[plane].length);
2356 	if (length < (vma->vm_end - vma->vm_start)) {
2357 		dprintk(q, 1,
2358 			"MMAP invalid, as it would overflow buffer length\n");
2359 		ret = -EINVAL;
2360 		goto unlock;
2361 	}
2362 
2363 	/*
2364 	 * vm_pgoff is treated in V4L2 API as a 'cookie' to select a buffer,
2365 	 * not as a in-buffer offset. We always want to mmap a whole buffer
2366 	 * from its beginning.
2367 	 */
2368 	vma->vm_pgoff = 0;
2369 
2370 	ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
2371 
2372 unlock:
2373 	mutex_unlock(&q->mmap_lock);
2374 	if (ret)
2375 		return ret;
2376 
2377 	dprintk(q, 3, "buffer %d, plane %d successfully mapped\n", buffer, plane);
2378 	return 0;
2379 }
2380 EXPORT_SYMBOL_GPL(vb2_mmap);
2381 
2382 #ifndef CONFIG_MMU
vb2_get_unmapped_area(struct vb2_queue * q,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)2383 unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
2384 				    unsigned long addr,
2385 				    unsigned long len,
2386 				    unsigned long pgoff,
2387 				    unsigned long flags)
2388 {
2389 	unsigned long off = pgoff << PAGE_SHIFT;
2390 	struct vb2_buffer *vb;
2391 	unsigned int buffer, plane;
2392 	void *vaddr;
2393 	int ret;
2394 
2395 	mutex_lock(&q->mmap_lock);
2396 
2397 	/*
2398 	 * Find the plane corresponding to the offset passed by userspace. This
2399 	 * will return an error if not MEMORY_MMAP or file I/O is in progress.
2400 	 */
2401 	ret = __find_plane_by_offset(q, off, &buffer, &plane);
2402 	if (ret)
2403 		goto unlock;
2404 
2405 	vb = q->bufs[buffer];
2406 
2407 	vaddr = vb2_plane_vaddr(vb, plane);
2408 	mutex_unlock(&q->mmap_lock);
2409 	return vaddr ? (unsigned long)vaddr : -EINVAL;
2410 
2411 unlock:
2412 	mutex_unlock(&q->mmap_lock);
2413 	return ret;
2414 }
2415 EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
2416 #endif
2417 
vb2_core_queue_init(struct vb2_queue * q)2418 int vb2_core_queue_init(struct vb2_queue *q)
2419 {
2420 	/*
2421 	 * Sanity check
2422 	 */
2423 	if (WARN_ON(!q)			  ||
2424 	    WARN_ON(!q->ops)		  ||
2425 	    WARN_ON(!q->mem_ops)	  ||
2426 	    WARN_ON(!q->type)		  ||
2427 	    WARN_ON(!q->io_modes)	  ||
2428 	    WARN_ON(!q->ops->queue_setup) ||
2429 	    WARN_ON(!q->ops->buf_queue))
2430 		return -EINVAL;
2431 
2432 	if (WARN_ON(q->requires_requests && !q->supports_requests))
2433 		return -EINVAL;
2434 
2435 	/*
2436 	 * This combination is not allowed since a non-zero value of
2437 	 * q->min_buffers_needed can cause vb2_core_qbuf() to fail if
2438 	 * it has to call start_streaming(), and the Request API expects
2439 	 * that queueing a request (and thus queueing a buffer contained
2440 	 * in that request) will always succeed. There is no method of
2441 	 * propagating an error back to userspace.
2442 	 */
2443 	if (WARN_ON(q->supports_requests && q->min_buffers_needed))
2444 		return -EINVAL;
2445 
2446 	INIT_LIST_HEAD(&q->queued_list);
2447 	INIT_LIST_HEAD(&q->done_list);
2448 	spin_lock_init(&q->done_lock);
2449 	mutex_init(&q->mmap_lock);
2450 	init_waitqueue_head(&q->done_wq);
2451 
2452 	q->memory = VB2_MEMORY_UNKNOWN;
2453 
2454 	if (q->buf_struct_size == 0)
2455 		q->buf_struct_size = sizeof(struct vb2_buffer);
2456 
2457 	if (q->bidirectional)
2458 		q->dma_dir = DMA_BIDIRECTIONAL;
2459 	else
2460 		q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
2461 
2462 	if (q->name[0] == '\0')
2463 		snprintf(q->name, sizeof(q->name), "%s-%p",
2464 			 q->is_output ? "out" : "cap", q);
2465 
2466 	return 0;
2467 }
2468 EXPORT_SYMBOL_GPL(vb2_core_queue_init);
2469 
2470 static int __vb2_init_fileio(struct vb2_queue *q, int read);
2471 static int __vb2_cleanup_fileio(struct vb2_queue *q);
vb2_core_queue_release(struct vb2_queue * q)2472 void vb2_core_queue_release(struct vb2_queue *q)
2473 {
2474 	__vb2_cleanup_fileio(q);
2475 	__vb2_queue_cancel(q);
2476 	mutex_lock(&q->mmap_lock);
2477 	__vb2_queue_free(q, q->num_buffers);
2478 	mutex_unlock(&q->mmap_lock);
2479 }
2480 EXPORT_SYMBOL_GPL(vb2_core_queue_release);
2481 
vb2_core_poll(struct vb2_queue * q,struct file * file,poll_table * wait)2482 __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
2483 		poll_table *wait)
2484 {
2485 	__poll_t req_events = poll_requested_events(wait);
2486 	struct vb2_buffer *vb = NULL;
2487 	unsigned long flags;
2488 
2489 	/*
2490 	 * poll_wait() MUST be called on the first invocation on all the
2491 	 * potential queues of interest, even if we are not interested in their
2492 	 * events during this first call. Failure to do so will result in
2493 	 * queue's events to be ignored because the poll_table won't be capable
2494 	 * of adding new wait queues thereafter.
2495 	 */
2496 	poll_wait(file, &q->done_wq, wait);
2497 
2498 	if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM)))
2499 		return 0;
2500 	if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM)))
2501 		return 0;
2502 
2503 	/*
2504 	 * Start file I/O emulator only if streaming API has not been used yet.
2505 	 */
2506 	if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
2507 		if (!q->is_output && (q->io_modes & VB2_READ) &&
2508 				(req_events & (EPOLLIN | EPOLLRDNORM))) {
2509 			if (__vb2_init_fileio(q, 1))
2510 				return EPOLLERR;
2511 		}
2512 		if (q->is_output && (q->io_modes & VB2_WRITE) &&
2513 				(req_events & (EPOLLOUT | EPOLLWRNORM))) {
2514 			if (__vb2_init_fileio(q, 0))
2515 				return EPOLLERR;
2516 			/*
2517 			 * Write to OUTPUT queue can be done immediately.
2518 			 */
2519 			return EPOLLOUT | EPOLLWRNORM;
2520 		}
2521 	}
2522 
2523 	/*
2524 	 * There is nothing to wait for if the queue isn't streaming, or if the
2525 	 * error flag is set.
2526 	 */
2527 	if (!vb2_is_streaming(q) || q->error)
2528 		return EPOLLERR;
2529 
2530 	/*
2531 	 * If this quirk is set and QBUF hasn't been called yet then
2532 	 * return EPOLLERR as well. This only affects capture queues, output
2533 	 * queues will always initialize waiting_for_buffers to false.
2534 	 * This quirk is set by V4L2 for backwards compatibility reasons.
2535 	 */
2536 	if (q->quirk_poll_must_check_waiting_for_buffers &&
2537 	    q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM)))
2538 		return EPOLLERR;
2539 
2540 	/*
2541 	 * For output streams you can call write() as long as there are fewer
2542 	 * buffers queued than there are buffers available.
2543 	 */
2544 	if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
2545 		return EPOLLOUT | EPOLLWRNORM;
2546 
2547 	if (list_empty(&q->done_list)) {
2548 		/*
2549 		 * If the last buffer was dequeued from a capture queue,
2550 		 * return immediately. DQBUF will return -EPIPE.
2551 		 */
2552 		if (q->last_buffer_dequeued)
2553 			return EPOLLIN | EPOLLRDNORM;
2554 	}
2555 
2556 	/*
2557 	 * Take first buffer available for dequeuing.
2558 	 */
2559 	spin_lock_irqsave(&q->done_lock, flags);
2560 	if (!list_empty(&q->done_list))
2561 		vb = list_first_entry(&q->done_list, struct vb2_buffer,
2562 					done_entry);
2563 	spin_unlock_irqrestore(&q->done_lock, flags);
2564 
2565 	if (vb && (vb->state == VB2_BUF_STATE_DONE
2566 			|| vb->state == VB2_BUF_STATE_ERROR)) {
2567 		return (q->is_output) ?
2568 				EPOLLOUT | EPOLLWRNORM :
2569 				EPOLLIN | EPOLLRDNORM;
2570 	}
2571 	return 0;
2572 }
2573 EXPORT_SYMBOL_GPL(vb2_core_poll);
2574 
2575 /*
2576  * struct vb2_fileio_buf - buffer context used by file io emulator
2577  *
2578  * vb2 provides a compatibility layer and emulator of file io (read and
2579  * write) calls on top of streaming API. This structure is used for
2580  * tracking context related to the buffers.
2581  */
2582 struct vb2_fileio_buf {
2583 	void *vaddr;
2584 	unsigned int size;
2585 	unsigned int pos;
2586 	unsigned int queued:1;
2587 };
2588 
2589 /*
2590  * struct vb2_fileio_data - queue context used by file io emulator
2591  *
2592  * @cur_index:	the index of the buffer currently being read from or
2593  *		written to. If equal to q->num_buffers then a new buffer
2594  *		must be dequeued.
2595  * @initial_index: in the read() case all buffers are queued up immediately
2596  *		in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
2597  *		buffers. However, in the write() case no buffers are initially
2598  *		queued, instead whenever a buffer is full it is queued up by
2599  *		__vb2_perform_fileio(). Only once all available buffers have
2600  *		been queued up will __vb2_perform_fileio() start to dequeue
2601  *		buffers. This means that initially __vb2_perform_fileio()
2602  *		needs to know what buffer index to use when it is queuing up
2603  *		the buffers for the first time. That initial index is stored
2604  *		in this field. Once it is equal to q->num_buffers all
2605  *		available buffers have been queued and __vb2_perform_fileio()
2606  *		should start the normal dequeue/queue cycle.
2607  *
2608  * vb2 provides a compatibility layer and emulator of file io (read and
2609  * write) calls on top of streaming API. For proper operation it required
2610  * this structure to save the driver state between each call of the read
2611  * or write function.
2612  */
2613 struct vb2_fileio_data {
2614 	unsigned int count;
2615 	unsigned int type;
2616 	unsigned int memory;
2617 	struct vb2_fileio_buf bufs[VB2_MAX_FRAME];
2618 	unsigned int cur_index;
2619 	unsigned int initial_index;
2620 	unsigned int q_count;
2621 	unsigned int dq_count;
2622 	unsigned read_once:1;
2623 	unsigned write_immediately:1;
2624 };
2625 
2626 /*
2627  * __vb2_init_fileio() - initialize file io emulator
2628  * @q:		videobuf2 queue
2629  * @read:	mode selector (1 means read, 0 means write)
2630  */
__vb2_init_fileio(struct vb2_queue * q,int read)2631 static int __vb2_init_fileio(struct vb2_queue *q, int read)
2632 {
2633 	struct vb2_fileio_data *fileio;
2634 	int i, ret;
2635 	unsigned int count = 0;
2636 
2637 	/*
2638 	 * Sanity check
2639 	 */
2640 	if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
2641 		    (!read && !(q->io_modes & VB2_WRITE))))
2642 		return -EINVAL;
2643 
2644 	/*
2645 	 * Check if device supports mapping buffers to kernel virtual space.
2646 	 */
2647 	if (!q->mem_ops->vaddr)
2648 		return -EBUSY;
2649 
2650 	/*
2651 	 * Check if streaming api has not been already activated.
2652 	 */
2653 	if (q->streaming || q->num_buffers > 0)
2654 		return -EBUSY;
2655 
2656 	/*
2657 	 * Start with count 1, driver can increase it in queue_setup()
2658 	 */
2659 	count = 1;
2660 
2661 	dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
2662 		(read) ? "read" : "write", count, q->fileio_read_once,
2663 		q->fileio_write_immediately);
2664 
2665 	fileio = kzalloc(sizeof(*fileio), GFP_KERNEL);
2666 	if (fileio == NULL)
2667 		return -ENOMEM;
2668 
2669 	fileio->read_once = q->fileio_read_once;
2670 	fileio->write_immediately = q->fileio_write_immediately;
2671 
2672 	/*
2673 	 * Request buffers and use MMAP type to force driver
2674 	 * to allocate buffers by itself.
2675 	 */
2676 	fileio->count = count;
2677 	fileio->memory = VB2_MEMORY_MMAP;
2678 	fileio->type = q->type;
2679 	q->fileio = fileio;
2680 	ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
2681 	if (ret)
2682 		goto err_kfree;
2683 
2684 	/*
2685 	 * Check if plane_count is correct
2686 	 * (multiplane buffers are not supported).
2687 	 */
2688 	if (q->bufs[0]->num_planes != 1) {
2689 		ret = -EBUSY;
2690 		goto err_reqbufs;
2691 	}
2692 
2693 	/*
2694 	 * Get kernel address of each buffer.
2695 	 */
2696 	for (i = 0; i < q->num_buffers; i++) {
2697 		fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
2698 		if (fileio->bufs[i].vaddr == NULL) {
2699 			ret = -EINVAL;
2700 			goto err_reqbufs;
2701 		}
2702 		fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
2703 	}
2704 
2705 	/*
2706 	 * Read mode requires pre queuing of all buffers.
2707 	 */
2708 	if (read) {
2709 		/*
2710 		 * Queue all buffers.
2711 		 */
2712 		for (i = 0; i < q->num_buffers; i++) {
2713 			ret = vb2_core_qbuf(q, i, NULL, NULL);
2714 			if (ret)
2715 				goto err_reqbufs;
2716 			fileio->bufs[i].queued = 1;
2717 		}
2718 		/*
2719 		 * All buffers have been queued, so mark that by setting
2720 		 * initial_index to q->num_buffers
2721 		 */
2722 		fileio->initial_index = q->num_buffers;
2723 		fileio->cur_index = q->num_buffers;
2724 	}
2725 
2726 	/*
2727 	 * Start streaming.
2728 	 */
2729 	ret = vb2_core_streamon(q, q->type);
2730 	if (ret)
2731 		goto err_reqbufs;
2732 
2733 	return ret;
2734 
2735 err_reqbufs:
2736 	fileio->count = 0;
2737 	vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
2738 
2739 err_kfree:
2740 	q->fileio = NULL;
2741 	kfree(fileio);
2742 	return ret;
2743 }
2744 
2745 /*
2746  * __vb2_cleanup_fileio() - free resourced used by file io emulator
2747  * @q:		videobuf2 queue
2748  */
__vb2_cleanup_fileio(struct vb2_queue * q)2749 static int __vb2_cleanup_fileio(struct vb2_queue *q)
2750 {
2751 	struct vb2_fileio_data *fileio = q->fileio;
2752 
2753 	if (fileio) {
2754 		vb2_core_streamoff(q, q->type);
2755 		q->fileio = NULL;
2756 		fileio->count = 0;
2757 		vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
2758 		kfree(fileio);
2759 		dprintk(q, 3, "file io emulator closed\n");
2760 	}
2761 	return 0;
2762 }
2763 
2764 /*
2765  * __vb2_perform_fileio() - perform a single file io (read or write) operation
2766  * @q:		videobuf2 queue
2767  * @data:	pointed to target userspace buffer
2768  * @count:	number of bytes to read or write
2769  * @ppos:	file handle position tracking pointer
2770  * @nonblock:	mode selector (1 means blocking calls, 0 means nonblocking)
2771  * @read:	access mode selector (1 means read, 0 means write)
2772  */
__vb2_perform_fileio(struct vb2_queue * q,char __user * data,size_t count,loff_t * ppos,int nonblock,int read)2773 static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
2774 		loff_t *ppos, int nonblock, int read)
2775 {
2776 	struct vb2_fileio_data *fileio;
2777 	struct vb2_fileio_buf *buf;
2778 	bool is_multiplanar = q->is_multiplanar;
2779 	/*
2780 	 * When using write() to write data to an output video node the vb2 core
2781 	 * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody
2782 	 * else is able to provide this information with the write() operation.
2783 	 */
2784 	bool copy_timestamp = !read && q->copy_timestamp;
2785 	unsigned index;
2786 	int ret;
2787 
2788 	dprintk(q, 3, "mode %s, offset %ld, count %zd, %sblocking\n",
2789 		read ? "read" : "write", (long)*ppos, count,
2790 		nonblock ? "non" : "");
2791 
2792 	if (!data)
2793 		return -EINVAL;
2794 
2795 	if (q->waiting_in_dqbuf) {
2796 		dprintk(q, 3, "another dup()ped fd is %s\n",
2797 			read ? "reading" : "writing");
2798 		return -EBUSY;
2799 	}
2800 
2801 	/*
2802 	 * Initialize emulator on first call.
2803 	 */
2804 	if (!vb2_fileio_is_active(q)) {
2805 		ret = __vb2_init_fileio(q, read);
2806 		dprintk(q, 3, "vb2_init_fileio result: %d\n", ret);
2807 		if (ret)
2808 			return ret;
2809 	}
2810 	fileio = q->fileio;
2811 
2812 	/*
2813 	 * Check if we need to dequeue the buffer.
2814 	 */
2815 	index = fileio->cur_index;
2816 	if (index >= q->num_buffers) {
2817 		struct vb2_buffer *b;
2818 
2819 		/*
2820 		 * Call vb2_dqbuf to get buffer back.
2821 		 */
2822 		ret = vb2_core_dqbuf(q, &index, NULL, nonblock);
2823 		dprintk(q, 5, "vb2_dqbuf result: %d\n", ret);
2824 		if (ret)
2825 			return ret;
2826 		fileio->dq_count += 1;
2827 
2828 		fileio->cur_index = index;
2829 		buf = &fileio->bufs[index];
2830 		b = q->bufs[index];
2831 
2832 		/*
2833 		 * Get number of bytes filled by the driver
2834 		 */
2835 		buf->pos = 0;
2836 		buf->queued = 0;
2837 		buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
2838 				 : vb2_plane_size(q->bufs[index], 0);
2839 		/* Compensate for data_offset on read in the multiplanar case. */
2840 		if (is_multiplanar && read &&
2841 				b->planes[0].data_offset < buf->size) {
2842 			buf->pos = b->planes[0].data_offset;
2843 			buf->size -= buf->pos;
2844 		}
2845 	} else {
2846 		buf = &fileio->bufs[index];
2847 	}
2848 
2849 	/*
2850 	 * Limit count on last few bytes of the buffer.
2851 	 */
2852 	if (buf->pos + count > buf->size) {
2853 		count = buf->size - buf->pos;
2854 		dprintk(q, 5, "reducing read count: %zd\n", count);
2855 	}
2856 
2857 	/*
2858 	 * Transfer data to userspace.
2859 	 */
2860 	dprintk(q, 3, "copying %zd bytes - buffer %d, offset %u\n",
2861 		count, index, buf->pos);
2862 	if (read)
2863 		ret = copy_to_user(data, buf->vaddr + buf->pos, count);
2864 	else
2865 		ret = copy_from_user(buf->vaddr + buf->pos, data, count);
2866 	if (ret) {
2867 		dprintk(q, 3, "error copying data\n");
2868 		return -EFAULT;
2869 	}
2870 
2871 	/*
2872 	 * Update counters.
2873 	 */
2874 	buf->pos += count;
2875 	*ppos += count;
2876 
2877 	/*
2878 	 * Queue next buffer if required.
2879 	 */
2880 	if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
2881 		struct vb2_buffer *b = q->bufs[index];
2882 
2883 		/*
2884 		 * Check if this is the last buffer to read.
2885 		 */
2886 		if (read && fileio->read_once && fileio->dq_count == 1) {
2887 			dprintk(q, 3, "read limit reached\n");
2888 			return __vb2_cleanup_fileio(q);
2889 		}
2890 
2891 		/*
2892 		 * Call vb2_qbuf and give buffer to the driver.
2893 		 */
2894 		b->planes[0].bytesused = buf->pos;
2895 
2896 		if (copy_timestamp)
2897 			b->timestamp = ktime_get_ns();
2898 		ret = vb2_core_qbuf(q, index, NULL, NULL);
2899 		dprintk(q, 5, "vb2_dbuf result: %d\n", ret);
2900 		if (ret)
2901 			return ret;
2902 
2903 		/*
2904 		 * Buffer has been queued, update the status
2905 		 */
2906 		buf->pos = 0;
2907 		buf->queued = 1;
2908 		buf->size = vb2_plane_size(q->bufs[index], 0);
2909 		fileio->q_count += 1;
2910 		/*
2911 		 * If we are queuing up buffers for the first time, then
2912 		 * increase initial_index by one.
2913 		 */
2914 		if (fileio->initial_index < q->num_buffers)
2915 			fileio->initial_index++;
2916 		/*
2917 		 * The next buffer to use is either a buffer that's going to be
2918 		 * queued for the first time (initial_index < q->num_buffers)
2919 		 * or it is equal to q->num_buffers, meaning that the next
2920 		 * time we need to dequeue a buffer since we've now queued up
2921 		 * all the 'first time' buffers.
2922 		 */
2923 		fileio->cur_index = fileio->initial_index;
2924 	}
2925 
2926 	/*
2927 	 * Return proper number of bytes processed.
2928 	 */
2929 	if (ret == 0)
2930 		ret = count;
2931 	return ret;
2932 }
2933 
vb2_read(struct vb2_queue * q,char __user * data,size_t count,loff_t * ppos,int nonblocking)2934 size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
2935 		loff_t *ppos, int nonblocking)
2936 {
2937 	return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
2938 }
2939 EXPORT_SYMBOL_GPL(vb2_read);
2940 
vb2_write(struct vb2_queue * q,const char __user * data,size_t count,loff_t * ppos,int nonblocking)2941 size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
2942 		loff_t *ppos, int nonblocking)
2943 {
2944 	return __vb2_perform_fileio(q, (char __user *) data, count,
2945 							ppos, nonblocking, 0);
2946 }
2947 EXPORT_SYMBOL_GPL(vb2_write);
2948 
2949 struct vb2_threadio_data {
2950 	struct task_struct *thread;
2951 	vb2_thread_fnc fnc;
2952 	void *priv;
2953 	bool stop;
2954 };
2955 
vb2_thread(void * data)2956 static int vb2_thread(void *data)
2957 {
2958 	struct vb2_queue *q = data;
2959 	struct vb2_threadio_data *threadio = q->threadio;
2960 	bool copy_timestamp = false;
2961 	unsigned prequeue = 0;
2962 	unsigned index = 0;
2963 	int ret = 0;
2964 
2965 	if (q->is_output) {
2966 		prequeue = q->num_buffers;
2967 		copy_timestamp = q->copy_timestamp;
2968 	}
2969 
2970 	set_freezable();
2971 
2972 	for (;;) {
2973 		struct vb2_buffer *vb;
2974 
2975 		/*
2976 		 * Call vb2_dqbuf to get buffer back.
2977 		 */
2978 		if (prequeue) {
2979 			vb = q->bufs[index++];
2980 			prequeue--;
2981 		} else {
2982 			call_void_qop(q, wait_finish, q);
2983 			if (!threadio->stop)
2984 				ret = vb2_core_dqbuf(q, &index, NULL, 0);
2985 			call_void_qop(q, wait_prepare, q);
2986 			dprintk(q, 5, "file io: vb2_dqbuf result: %d\n", ret);
2987 			if (!ret)
2988 				vb = q->bufs[index];
2989 		}
2990 		if (ret || threadio->stop)
2991 			break;
2992 		try_to_freeze();
2993 
2994 		if (vb->state != VB2_BUF_STATE_ERROR)
2995 			if (threadio->fnc(vb, threadio->priv))
2996 				break;
2997 		call_void_qop(q, wait_finish, q);
2998 		if (copy_timestamp)
2999 			vb->timestamp = ktime_get_ns();
3000 		if (!threadio->stop)
3001 			ret = vb2_core_qbuf(q, vb->index, NULL, NULL);
3002 		call_void_qop(q, wait_prepare, q);
3003 		if (ret || threadio->stop)
3004 			break;
3005 	}
3006 
3007 	/* Hmm, linux becomes *very* unhappy without this ... */
3008 	while (!kthread_should_stop()) {
3009 		set_current_state(TASK_INTERRUPTIBLE);
3010 		schedule();
3011 	}
3012 	return 0;
3013 }
3014 
3015 /*
3016  * This function should not be used for anything else but the videobuf2-dvb
3017  * support. If you think you have another good use-case for this, then please
3018  * contact the linux-media mailinglist first.
3019  */
vb2_thread_start(struct vb2_queue * q,vb2_thread_fnc fnc,void * priv,const char * thread_name)3020 int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
3021 		     const char *thread_name)
3022 {
3023 	struct vb2_threadio_data *threadio;
3024 	int ret = 0;
3025 
3026 	if (q->threadio)
3027 		return -EBUSY;
3028 	if (vb2_is_busy(q))
3029 		return -EBUSY;
3030 	if (WARN_ON(q->fileio))
3031 		return -EBUSY;
3032 
3033 	threadio = kzalloc(sizeof(*threadio), GFP_KERNEL);
3034 	if (threadio == NULL)
3035 		return -ENOMEM;
3036 	threadio->fnc = fnc;
3037 	threadio->priv = priv;
3038 
3039 	ret = __vb2_init_fileio(q, !q->is_output);
3040 	dprintk(q, 3, "file io: vb2_init_fileio result: %d\n", ret);
3041 	if (ret)
3042 		goto nomem;
3043 	q->threadio = threadio;
3044 	threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name);
3045 	if (IS_ERR(threadio->thread)) {
3046 		ret = PTR_ERR(threadio->thread);
3047 		threadio->thread = NULL;
3048 		goto nothread;
3049 	}
3050 	return 0;
3051 
3052 nothread:
3053 	__vb2_cleanup_fileio(q);
3054 nomem:
3055 	kfree(threadio);
3056 	return ret;
3057 }
3058 EXPORT_SYMBOL_GPL(vb2_thread_start);
3059 
vb2_thread_stop(struct vb2_queue * q)3060 int vb2_thread_stop(struct vb2_queue *q)
3061 {
3062 	struct vb2_threadio_data *threadio = q->threadio;
3063 	int err;
3064 
3065 	if (threadio == NULL)
3066 		return 0;
3067 	threadio->stop = true;
3068 	/* Wake up all pending sleeps in the thread */
3069 	vb2_queue_error(q);
3070 	err = kthread_stop(threadio->thread);
3071 	__vb2_cleanup_fileio(q);
3072 	threadio->thread = NULL;
3073 	kfree(threadio);
3074 	q->threadio = NULL;
3075 	return err;
3076 }
3077 EXPORT_SYMBOL_GPL(vb2_thread_stop);
3078 
3079 MODULE_DESCRIPTION("Media buffer core framework");
3080 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
3081 MODULE_LICENSE("GPL");
3082 MODULE_IMPORT_NS(DMA_BUF);
3083