1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2020-2021 NXP
4  */
5 
6 #include <linux/init.h>
7 #include <linux/interconnect.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/videodev2.h>
14 #include <media/v4l2-device.h>
15 #include <media/v4l2-event.h>
16 #include <media/v4l2-mem2mem.h>
17 #include <media/v4l2-ioctl.h>
18 #include <media/videobuf2-v4l2.h>
19 #include <media/videobuf2-dma-contig.h>
20 #include <media/videobuf2-vmalloc.h>
21 #include "vpu.h"
22 #include "vpu_core.h"
23 #include "vpu_v4l2.h"
24 #include "vpu_msgs.h"
25 #include "vpu_helpers.h"
26 
vpu_inst_lock(struct vpu_inst * inst)27 void vpu_inst_lock(struct vpu_inst *inst)
28 {
29 	mutex_lock(&inst->lock);
30 }
31 
vpu_inst_unlock(struct vpu_inst * inst)32 void vpu_inst_unlock(struct vpu_inst *inst)
33 {
34 	mutex_unlock(&inst->lock);
35 }
36 
vpu_get_vb_phy_addr(struct vb2_buffer * vb,u32 plane_no)37 dma_addr_t vpu_get_vb_phy_addr(struct vb2_buffer *vb, u32 plane_no)
38 {
39 	if (plane_no >= vb->num_planes)
40 		return 0;
41 	return vb2_dma_contig_plane_dma_addr(vb, plane_no) +
42 			vb->planes[plane_no].data_offset;
43 }
44 
vpu_get_vb_length(struct vb2_buffer * vb,u32 plane_no)45 unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no)
46 {
47 	if (plane_no >= vb->num_planes)
48 		return 0;
49 	return vb2_plane_size(vb, plane_no) - vb->planes[plane_no].data_offset;
50 }
51 
vpu_set_buffer_state(struct vb2_v4l2_buffer * vbuf,unsigned int state)52 void vpu_set_buffer_state(struct vb2_v4l2_buffer *vbuf, unsigned int state)
53 {
54 	struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf);
55 
56 	vpu_buf->state = state;
57 }
58 
vpu_get_buffer_state(struct vb2_v4l2_buffer * vbuf)59 unsigned int vpu_get_buffer_state(struct vb2_v4l2_buffer *vbuf)
60 {
61 	struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf);
62 
63 	return vpu_buf->state;
64 }
65 
vpu_v4l2_set_error(struct vpu_inst * inst)66 void vpu_v4l2_set_error(struct vpu_inst *inst)
67 {
68 	vpu_inst_lock(inst);
69 	dev_err(inst->dev, "some error occurs in codec\n");
70 	if (inst->fh.m2m_ctx) {
71 		vb2_queue_error(v4l2_m2m_get_src_vq(inst->fh.m2m_ctx));
72 		vb2_queue_error(v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx));
73 	}
74 	vpu_inst_unlock(inst);
75 }
76 
vpu_notify_eos(struct vpu_inst * inst)77 int vpu_notify_eos(struct vpu_inst *inst)
78 {
79 	static const struct v4l2_event ev = {
80 		.id = 0,
81 		.type = V4L2_EVENT_EOS
82 	};
83 
84 	vpu_trace(inst->dev, "[%d]\n", inst->id);
85 	v4l2_event_queue_fh(&inst->fh, &ev);
86 
87 	return 0;
88 }
89 
vpu_notify_source_change(struct vpu_inst * inst)90 int vpu_notify_source_change(struct vpu_inst *inst)
91 {
92 	static const struct v4l2_event ev = {
93 		.id = 0,
94 		.type = V4L2_EVENT_SOURCE_CHANGE,
95 		.u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION
96 	};
97 
98 	vpu_trace(inst->dev, "[%d]\n", inst->id);
99 	v4l2_event_queue_fh(&inst->fh, &ev);
100 	return 0;
101 }
102 
vpu_set_last_buffer_dequeued(struct vpu_inst * inst)103 int vpu_set_last_buffer_dequeued(struct vpu_inst *inst)
104 {
105 	struct vb2_queue *q;
106 
107 	if (!inst || !inst->fh.m2m_ctx)
108 		return -EINVAL;
109 
110 	q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
111 	if (!list_empty(&q->done_list))
112 		return -EINVAL;
113 
114 	if (q->last_buffer_dequeued)
115 		return 0;
116 	vpu_trace(inst->dev, "last buffer dequeued\n");
117 	q->last_buffer_dequeued = true;
118 	wake_up(&q->done_wq);
119 	vpu_notify_eos(inst);
120 	return 0;
121 }
122 
vpu_is_source_empty(struct vpu_inst * inst)123 bool vpu_is_source_empty(struct vpu_inst *inst)
124 {
125 	struct v4l2_m2m_buffer *buf = NULL;
126 
127 	if (!inst->fh.m2m_ctx)
128 		return true;
129 	v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) {
130 		if (vpu_get_buffer_state(&buf->vb) == VPU_BUF_STATE_IDLE)
131 			return false;
132 	}
133 	return true;
134 }
135 
vpu_try_fmt_common(struct vpu_inst * inst,struct v4l2_format * f)136 const struct vpu_format *vpu_try_fmt_common(struct vpu_inst *inst, struct v4l2_format *f)
137 {
138 	struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
139 	u32 type = f->type;
140 	u32 stride = 1;
141 	u32 bytesperline;
142 	u32 sizeimage;
143 	const struct vpu_format *fmt;
144 	const struct vpu_core_resources *res;
145 	int i;
146 
147 	fmt = vpu_helper_find_format(inst, type, pixmp->pixelformat);
148 	if (!fmt) {
149 		fmt = vpu_helper_enum_format(inst, type, 0);
150 		if (!fmt)
151 			return NULL;
152 		pixmp->pixelformat = fmt->pixfmt;
153 	}
154 
155 	res = vpu_get_resource(inst);
156 	if (res)
157 		stride = res->stride;
158 	if (pixmp->width)
159 		pixmp->width = vpu_helper_valid_frame_width(inst, pixmp->width);
160 	if (pixmp->height)
161 		pixmp->height = vpu_helper_valid_frame_height(inst, pixmp->height);
162 	pixmp->flags = fmt->flags;
163 	pixmp->num_planes = fmt->num_planes;
164 	if (pixmp->field == V4L2_FIELD_ANY)
165 		pixmp->field = V4L2_FIELD_NONE;
166 	for (i = 0; i < pixmp->num_planes; i++) {
167 		bytesperline = max_t(s32, pixmp->plane_fmt[i].bytesperline, 0);
168 		sizeimage = vpu_helper_get_plane_size(pixmp->pixelformat,
169 						      pixmp->width,
170 						      pixmp->height,
171 						      i,
172 						      stride,
173 						      pixmp->field > V4L2_FIELD_NONE ? 1 : 0,
174 						      &bytesperline);
175 		sizeimage = max_t(s32, pixmp->plane_fmt[i].sizeimage, sizeimage);
176 		pixmp->plane_fmt[i].bytesperline = bytesperline;
177 		pixmp->plane_fmt[i].sizeimage = sizeimage;
178 	}
179 
180 	return fmt;
181 }
182 
vpu_check_ready(struct vpu_inst * inst,u32 type)183 static bool vpu_check_ready(struct vpu_inst *inst, u32 type)
184 {
185 	if (!inst)
186 		return false;
187 	if (inst->state == VPU_CODEC_STATE_DEINIT || inst->id < 0)
188 		return false;
189 	if (!inst->ops->check_ready)
190 		return true;
191 	return call_vop(inst, check_ready, type);
192 }
193 
vpu_process_output_buffer(struct vpu_inst * inst)194 int vpu_process_output_buffer(struct vpu_inst *inst)
195 {
196 	struct v4l2_m2m_buffer *buf = NULL;
197 	struct vb2_v4l2_buffer *vbuf = NULL;
198 
199 	if (!inst || !inst->fh.m2m_ctx)
200 		return -EINVAL;
201 
202 	if (!vpu_check_ready(inst, inst->out_format.type))
203 		return -EINVAL;
204 
205 	v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) {
206 		vbuf = &buf->vb;
207 		if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_IDLE)
208 			break;
209 		vbuf = NULL;
210 	}
211 
212 	if (!vbuf)
213 		return -EINVAL;
214 
215 	dev_dbg(inst->dev, "[%d]frame id = %d / %d\n",
216 		inst->id, vbuf->sequence, inst->sequence);
217 	return call_vop(inst, process_output, &vbuf->vb2_buf);
218 }
219 
vpu_process_capture_buffer(struct vpu_inst * inst)220 int vpu_process_capture_buffer(struct vpu_inst *inst)
221 {
222 	struct v4l2_m2m_buffer *buf = NULL;
223 	struct vb2_v4l2_buffer *vbuf = NULL;
224 
225 	if (!inst || !inst->fh.m2m_ctx)
226 		return -EINVAL;
227 
228 	if (!vpu_check_ready(inst, inst->cap_format.type))
229 		return -EINVAL;
230 
231 	v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) {
232 		vbuf = &buf->vb;
233 		if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_IDLE)
234 			break;
235 		vbuf = NULL;
236 	}
237 	if (!vbuf)
238 		return -EINVAL;
239 
240 	return call_vop(inst, process_capture, &vbuf->vb2_buf);
241 }
242 
vpu_next_src_buf(struct vpu_inst * inst)243 struct vb2_v4l2_buffer *vpu_next_src_buf(struct vpu_inst *inst)
244 {
245 	struct vb2_v4l2_buffer *src_buf = NULL;
246 
247 	if (!inst->fh.m2m_ctx)
248 		return NULL;
249 
250 	src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx);
251 	if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE)
252 		return NULL;
253 
254 	while (vpu_vb_is_codecconfig(src_buf)) {
255 		v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx);
256 		vpu_set_buffer_state(src_buf, VPU_BUF_STATE_IDLE);
257 		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
258 
259 		src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx);
260 		if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE)
261 			return NULL;
262 	}
263 
264 	return src_buf;
265 }
266 
vpu_skip_frame(struct vpu_inst * inst,int count)267 void vpu_skip_frame(struct vpu_inst *inst, int count)
268 {
269 	struct vb2_v4l2_buffer *src_buf;
270 	enum vb2_buffer_state state;
271 	int i = 0;
272 
273 	if (count <= 0 || !inst->fh.m2m_ctx)
274 		return;
275 
276 	while (i < count) {
277 		src_buf = v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx);
278 		if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE)
279 			return;
280 		if (vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_DECODED)
281 			state = VB2_BUF_STATE_DONE;
282 		else
283 			state = VB2_BUF_STATE_ERROR;
284 		i++;
285 		vpu_set_buffer_state(src_buf, VPU_BUF_STATE_IDLE);
286 		v4l2_m2m_buf_done(src_buf, state);
287 	}
288 }
289 
vpu_find_buf_by_sequence(struct vpu_inst * inst,u32 type,u32 sequence)290 struct vb2_v4l2_buffer *vpu_find_buf_by_sequence(struct vpu_inst *inst, u32 type, u32 sequence)
291 {
292 	struct v4l2_m2m_buffer *buf = NULL;
293 	struct vb2_v4l2_buffer *vbuf = NULL;
294 
295 	if (!inst || !inst->fh.m2m_ctx)
296 		return NULL;
297 
298 	if (V4L2_TYPE_IS_OUTPUT(type)) {
299 		v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) {
300 			vbuf = &buf->vb;
301 			if (vbuf->sequence == sequence)
302 				break;
303 			vbuf = NULL;
304 		}
305 	} else {
306 		v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) {
307 			vbuf = &buf->vb;
308 			if (vbuf->sequence == sequence)
309 				break;
310 			vbuf = NULL;
311 		}
312 	}
313 
314 	return vbuf;
315 }
316 
vpu_find_buf_by_idx(struct vpu_inst * inst,u32 type,u32 idx)317 struct vb2_v4l2_buffer *vpu_find_buf_by_idx(struct vpu_inst *inst, u32 type, u32 idx)
318 {
319 	struct v4l2_m2m_buffer *buf = NULL;
320 	struct vb2_v4l2_buffer *vbuf = NULL;
321 
322 	if (!inst || !inst->fh.m2m_ctx)
323 		return NULL;
324 
325 	if (V4L2_TYPE_IS_OUTPUT(type)) {
326 		v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) {
327 			vbuf = &buf->vb;
328 			if (vbuf->vb2_buf.index == idx)
329 				break;
330 			vbuf = NULL;
331 		}
332 	} else {
333 		v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) {
334 			vbuf = &buf->vb;
335 			if (vbuf->vb2_buf.index == idx)
336 				break;
337 			vbuf = NULL;
338 		}
339 	}
340 
341 	return vbuf;
342 }
343 
vpu_get_num_buffers(struct vpu_inst * inst,u32 type)344 int vpu_get_num_buffers(struct vpu_inst *inst, u32 type)
345 {
346 	struct vb2_queue *q;
347 
348 	if (!inst || !inst->fh.m2m_ctx)
349 		return -EINVAL;
350 
351 	if (V4L2_TYPE_IS_OUTPUT(type))
352 		q = v4l2_m2m_get_src_vq(inst->fh.m2m_ctx);
353 	else
354 		q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
355 
356 	return q->num_buffers;
357 }
358 
vpu_m2m_device_run(void * priv)359 static void vpu_m2m_device_run(void *priv)
360 {
361 }
362 
vpu_m2m_job_abort(void * priv)363 static void vpu_m2m_job_abort(void *priv)
364 {
365 	struct vpu_inst *inst = priv;
366 	struct v4l2_m2m_ctx *m2m_ctx = inst->fh.m2m_ctx;
367 
368 	v4l2_m2m_job_finish(m2m_ctx->m2m_dev, m2m_ctx);
369 }
370 
371 static const struct v4l2_m2m_ops vpu_m2m_ops = {
372 	.device_run = vpu_m2m_device_run,
373 	.job_abort = vpu_m2m_job_abort
374 };
375 
vpu_vb2_queue_setup(struct vb2_queue * vq,unsigned int * buf_count,unsigned int * plane_count,unsigned int psize[],struct device * allocators[])376 static int vpu_vb2_queue_setup(struct vb2_queue *vq,
377 			       unsigned int *buf_count,
378 			       unsigned int *plane_count,
379 			       unsigned int psize[],
380 			       struct device *allocators[])
381 {
382 	struct vpu_inst *inst = vb2_get_drv_priv(vq);
383 	struct vpu_format *cur_fmt;
384 	int i;
385 
386 	cur_fmt = vpu_get_format(inst, vq->type);
387 
388 	if (*plane_count) {
389 		if (*plane_count != cur_fmt->num_planes)
390 			return -EINVAL;
391 		for (i = 0; i < cur_fmt->num_planes; i++) {
392 			if (psize[i] < cur_fmt->sizeimage[i])
393 				return -EINVAL;
394 		}
395 		return 0;
396 	}
397 
398 	if (V4L2_TYPE_IS_OUTPUT(vq->type))
399 		*buf_count = max_t(unsigned int, *buf_count, inst->min_buffer_out);
400 	else
401 		*buf_count = max_t(unsigned int, *buf_count, inst->min_buffer_cap);
402 	*plane_count = cur_fmt->num_planes;
403 	for (i = 0; i < cur_fmt->num_planes; i++)
404 		psize[i] = cur_fmt->sizeimage[i];
405 
406 	return 0;
407 }
408 
vpu_vb2_buf_init(struct vb2_buffer * vb)409 static int vpu_vb2_buf_init(struct vb2_buffer *vb)
410 {
411 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
412 
413 	vpu_set_buffer_state(vbuf, VPU_BUF_STATE_IDLE);
414 	return 0;
415 }
416 
vpu_vb2_buf_out_validate(struct vb2_buffer * vb)417 static int vpu_vb2_buf_out_validate(struct vb2_buffer *vb)
418 {
419 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
420 
421 	vbuf->field = V4L2_FIELD_NONE;
422 
423 	return 0;
424 }
425 
vpu_vb2_buf_prepare(struct vb2_buffer * vb)426 static int vpu_vb2_buf_prepare(struct vb2_buffer *vb)
427 {
428 	struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
429 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
430 	struct vpu_format *cur_fmt;
431 	u32 i;
432 
433 	cur_fmt = vpu_get_format(inst, vb->type);
434 	for (i = 0; i < cur_fmt->num_planes; i++) {
435 		if (vpu_get_vb_length(vb, i) < cur_fmt->sizeimage[i]) {
436 			dev_dbg(inst->dev, "[%d] %s buf[%d] is invalid\n",
437 				inst->id, vpu_type_name(vb->type), vb->index);
438 			vpu_set_buffer_state(vbuf, VPU_BUF_STATE_ERROR);
439 		}
440 	}
441 
442 	return 0;
443 }
444 
vpu_vb2_buf_finish(struct vb2_buffer * vb)445 static void vpu_vb2_buf_finish(struct vb2_buffer *vb)
446 {
447 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
448 	struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
449 	struct vb2_queue *q = vb->vb2_queue;
450 
451 	if (vbuf->flags & V4L2_BUF_FLAG_LAST)
452 		vpu_notify_eos(inst);
453 
454 	if (list_empty(&q->done_list))
455 		call_void_vop(inst, on_queue_empty, q->type);
456 }
457 
vpu_vb2_buffers_return(struct vpu_inst * inst,unsigned int type,enum vb2_buffer_state state)458 void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state)
459 {
460 	struct vb2_v4l2_buffer *buf;
461 
462 	if (V4L2_TYPE_IS_OUTPUT(type)) {
463 		while ((buf = v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx))) {
464 			vpu_set_buffer_state(buf, VPU_BUF_STATE_IDLE);
465 			v4l2_m2m_buf_done(buf, state);
466 		}
467 	} else {
468 		while ((buf = v4l2_m2m_dst_buf_remove(inst->fh.m2m_ctx))) {
469 			vpu_set_buffer_state(buf, VPU_BUF_STATE_IDLE);
470 			v4l2_m2m_buf_done(buf, state);
471 		}
472 	}
473 }
474 
vpu_vb2_start_streaming(struct vb2_queue * q,unsigned int count)475 static int vpu_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
476 {
477 	struct vpu_inst *inst = vb2_get_drv_priv(q);
478 	struct vpu_format *fmt = vpu_get_format(inst, q->type);
479 	int ret;
480 
481 	vpu_inst_unlock(inst);
482 	ret = vpu_inst_register(inst);
483 	vpu_inst_lock(inst);
484 	if (ret) {
485 		vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_QUEUED);
486 		return ret;
487 	}
488 
489 	vpu_trace(inst->dev, "[%d] %s %c%c%c%c %dx%d %u(%u) %u(%u) %u(%u) %d\n",
490 		  inst->id, vpu_type_name(q->type),
491 		  fmt->pixfmt,
492 		  fmt->pixfmt >> 8,
493 		  fmt->pixfmt >> 16,
494 		  fmt->pixfmt >> 24,
495 		  fmt->width, fmt->height,
496 		  fmt->sizeimage[0], fmt->bytesperline[0],
497 		  fmt->sizeimage[1], fmt->bytesperline[1],
498 		  fmt->sizeimage[2], fmt->bytesperline[2],
499 		  q->num_buffers);
500 	vb2_clear_last_buffer_dequeued(q);
501 	ret = call_vop(inst, start, q->type);
502 	if (ret)
503 		vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_QUEUED);
504 
505 	return ret;
506 }
507 
vpu_vb2_stop_streaming(struct vb2_queue * q)508 static void vpu_vb2_stop_streaming(struct vb2_queue *q)
509 {
510 	struct vpu_inst *inst = vb2_get_drv_priv(q);
511 
512 	vpu_trace(inst->dev, "[%d] %s\n", inst->id, vpu_type_name(q->type));
513 
514 	call_void_vop(inst, stop, q->type);
515 	vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_ERROR);
516 	if (V4L2_TYPE_IS_OUTPUT(q->type))
517 		inst->sequence = 0;
518 }
519 
vpu_vb2_buf_queue(struct vb2_buffer * vb)520 static void vpu_vb2_buf_queue(struct vb2_buffer *vb)
521 {
522 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
523 	struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
524 
525 	if (V4L2_TYPE_IS_OUTPUT(vb->type))
526 		vbuf->sequence = inst->sequence++;
527 
528 	v4l2_m2m_buf_queue(inst->fh.m2m_ctx, vbuf);
529 	vpu_process_output_buffer(inst);
530 	vpu_process_capture_buffer(inst);
531 }
532 
533 static const struct vb2_ops vpu_vb2_ops = {
534 	.queue_setup        = vpu_vb2_queue_setup,
535 	.buf_init           = vpu_vb2_buf_init,
536 	.buf_out_validate   = vpu_vb2_buf_out_validate,
537 	.buf_prepare        = vpu_vb2_buf_prepare,
538 	.buf_finish         = vpu_vb2_buf_finish,
539 	.start_streaming    = vpu_vb2_start_streaming,
540 	.stop_streaming     = vpu_vb2_stop_streaming,
541 	.buf_queue          = vpu_vb2_buf_queue,
542 	.wait_prepare       = vb2_ops_wait_prepare,
543 	.wait_finish        = vb2_ops_wait_finish,
544 };
545 
vpu_m2m_queue_init(void * priv,struct vb2_queue * src_vq,struct vb2_queue * dst_vq)546 static int vpu_m2m_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
547 {
548 	struct vpu_inst *inst = priv;
549 	int ret;
550 
551 	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
552 	inst->out_format.type = src_vq->type;
553 	src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
554 	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
555 	src_vq->ops = &vpu_vb2_ops;
556 	src_vq->mem_ops = &vb2_dma_contig_memops;
557 	if (inst->type == VPU_CORE_TYPE_DEC && inst->use_stream_buffer)
558 		src_vq->mem_ops = &vb2_vmalloc_memops;
559 	src_vq->drv_priv = inst;
560 	src_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer);
561 	src_vq->min_buffers_needed = 1;
562 	src_vq->dev = inst->vpu->dev;
563 	src_vq->lock = &inst->lock;
564 	ret = vb2_queue_init(src_vq);
565 	if (ret)
566 		return ret;
567 
568 	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
569 	inst->cap_format.type = dst_vq->type;
570 	dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
571 	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
572 	dst_vq->ops = &vpu_vb2_ops;
573 	dst_vq->mem_ops = &vb2_dma_contig_memops;
574 	if (inst->type == VPU_CORE_TYPE_ENC && inst->use_stream_buffer)
575 		dst_vq->mem_ops = &vb2_vmalloc_memops;
576 	dst_vq->drv_priv = inst;
577 	dst_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer);
578 	dst_vq->min_buffers_needed = 1;
579 	dst_vq->dev = inst->vpu->dev;
580 	dst_vq->lock = &inst->lock;
581 	ret = vb2_queue_init(dst_vq);
582 	if (ret) {
583 		vb2_queue_release(src_vq);
584 		return ret;
585 	}
586 
587 	return 0;
588 }
589 
vpu_v4l2_release(struct vpu_inst * inst)590 static int vpu_v4l2_release(struct vpu_inst *inst)
591 {
592 	vpu_trace(inst->vpu->dev, "%p\n", inst);
593 
594 	vpu_release_core(inst->core);
595 	put_device(inst->dev);
596 
597 	if (inst->workqueue) {
598 		cancel_work_sync(&inst->msg_work);
599 		destroy_workqueue(inst->workqueue);
600 		inst->workqueue = NULL;
601 	}
602 
603 	v4l2_ctrl_handler_free(&inst->ctrl_handler);
604 	mutex_destroy(&inst->lock);
605 	v4l2_fh_del(&inst->fh);
606 	v4l2_fh_exit(&inst->fh);
607 
608 	call_void_vop(inst, cleanup);
609 
610 	return 0;
611 }
612 
vpu_v4l2_open(struct file * file,struct vpu_inst * inst)613 int vpu_v4l2_open(struct file *file, struct vpu_inst *inst)
614 {
615 	struct vpu_dev *vpu = video_drvdata(file);
616 	struct vpu_func *func;
617 	int ret = 0;
618 
619 	if (!inst || !inst->ops)
620 		return -EINVAL;
621 
622 	if (inst->type == VPU_CORE_TYPE_ENC)
623 		func = &vpu->encoder;
624 	else
625 		func = &vpu->decoder;
626 
627 	atomic_set(&inst->ref_count, 0);
628 	vpu_inst_get(inst);
629 	inst->vpu = vpu;
630 	inst->core = vpu_request_core(vpu, inst->type);
631 	if (inst->core)
632 		inst->dev = get_device(inst->core->dev);
633 	mutex_init(&inst->lock);
634 	INIT_LIST_HEAD(&inst->cmd_q);
635 	inst->id = VPU_INST_NULL_ID;
636 	inst->release = vpu_v4l2_release;
637 	inst->pid = current->pid;
638 	inst->tgid = current->tgid;
639 	inst->min_buffer_cap = 2;
640 	inst->min_buffer_out = 2;
641 	v4l2_fh_init(&inst->fh, func->vfd);
642 	v4l2_fh_add(&inst->fh);
643 
644 	ret = call_vop(inst, ctrl_init);
645 	if (ret)
646 		goto error;
647 
648 	inst->fh.m2m_ctx = v4l2_m2m_ctx_init(func->m2m_dev, inst, vpu_m2m_queue_init);
649 	if (IS_ERR(inst->fh.m2m_ctx)) {
650 		dev_err(vpu->dev, "v4l2_m2m_ctx_init fail\n");
651 		ret = PTR_ERR(inst->fh.m2m_ctx);
652 		goto error;
653 	}
654 
655 	inst->fh.ctrl_handler = &inst->ctrl_handler;
656 	file->private_data = &inst->fh;
657 	inst->state = VPU_CODEC_STATE_DEINIT;
658 	inst->workqueue = alloc_workqueue("vpu_inst", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
659 	if (inst->workqueue) {
660 		INIT_WORK(&inst->msg_work, vpu_inst_run_work);
661 		ret = kfifo_init(&inst->msg_fifo,
662 				 inst->msg_buffer,
663 				 rounddown_pow_of_two(sizeof(inst->msg_buffer)));
664 		if (ret) {
665 			destroy_workqueue(inst->workqueue);
666 			inst->workqueue = NULL;
667 		}
668 	}
669 	vpu_trace(vpu->dev, "tgid = %d, pid = %d, type = %s, inst = %p\n",
670 		  inst->tgid, inst->pid, vpu_core_type_desc(inst->type), inst);
671 
672 	return 0;
673 error:
674 	vpu_inst_put(inst);
675 	return ret;
676 }
677 
vpu_v4l2_close(struct file * file)678 int vpu_v4l2_close(struct file *file)
679 {
680 	struct vpu_dev *vpu = video_drvdata(file);
681 	struct vpu_inst *inst = to_inst(file);
682 
683 	vpu_trace(vpu->dev, "tgid = %d, pid = %d, inst = %p\n", inst->tgid, inst->pid, inst);
684 
685 	vpu_inst_lock(inst);
686 	if (inst->fh.m2m_ctx) {
687 		v4l2_m2m_ctx_release(inst->fh.m2m_ctx);
688 		inst->fh.m2m_ctx = NULL;
689 	}
690 	vpu_inst_unlock(inst);
691 
692 	call_void_vop(inst, release);
693 	vpu_inst_unregister(inst);
694 	vpu_inst_put(inst);
695 
696 	return 0;
697 }
698 
vpu_add_func(struct vpu_dev * vpu,struct vpu_func * func)699 int vpu_add_func(struct vpu_dev *vpu, struct vpu_func *func)
700 {
701 	struct video_device *vfd;
702 	int ret;
703 
704 	if (!vpu || !func)
705 		return -EINVAL;
706 
707 	if (func->vfd)
708 		return 0;
709 
710 	func->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
711 	if (IS_ERR(func->m2m_dev)) {
712 		dev_err(vpu->dev, "v4l2_m2m_init fail\n");
713 		func->vfd = NULL;
714 		return PTR_ERR(func->m2m_dev);
715 	}
716 
717 	vfd = video_device_alloc();
718 	if (!vfd) {
719 		v4l2_m2m_release(func->m2m_dev);
720 		dev_err(vpu->dev, "alloc vpu decoder video device fail\n");
721 		return -ENOMEM;
722 	}
723 	vfd->release = video_device_release;
724 	vfd->vfl_dir = VFL_DIR_M2M;
725 	vfd->v4l2_dev = &vpu->v4l2_dev;
726 	vfd->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
727 	if (func->type == VPU_CORE_TYPE_ENC) {
728 		strscpy(vfd->name, "amphion-vpu-encoder", sizeof(vfd->name));
729 		vfd->fops = venc_get_fops();
730 		vfd->ioctl_ops = venc_get_ioctl_ops();
731 	} else {
732 		strscpy(vfd->name, "amphion-vpu-decoder", sizeof(vfd->name));
733 		vfd->fops = vdec_get_fops();
734 		vfd->ioctl_ops = vdec_get_ioctl_ops();
735 	}
736 
737 	ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
738 	if (ret) {
739 		video_device_release(vfd);
740 		v4l2_m2m_release(func->m2m_dev);
741 		return ret;
742 	}
743 	video_set_drvdata(vfd, vpu);
744 	func->vfd = vfd;
745 
746 	ret = v4l2_m2m_register_media_controller(func->m2m_dev, func->vfd, func->function);
747 	if (ret) {
748 		v4l2_m2m_release(func->m2m_dev);
749 		func->m2m_dev = NULL;
750 		video_unregister_device(func->vfd);
751 		func->vfd = NULL;
752 		return ret;
753 	}
754 
755 	return 0;
756 }
757 
vpu_remove_func(struct vpu_func * func)758 void vpu_remove_func(struct vpu_func *func)
759 {
760 	if (!func)
761 		return;
762 
763 	if (func->m2m_dev) {
764 		v4l2_m2m_unregister_media_controller(func->m2m_dev);
765 		v4l2_m2m_release(func->m2m_dev);
766 		func->m2m_dev = NULL;
767 	}
768 	if (func->vfd) {
769 		video_unregister_device(func->vfd);
770 		func->vfd = NULL;
771 	}
772 }
773