1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2020-2021 NXP
4 */
5
6 #include <linux/init.h>
7 #include <linux/interconnect.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/videodev2.h>
14 #include <media/v4l2-device.h>
15 #include <media/v4l2-event.h>
16 #include <media/v4l2-mem2mem.h>
17 #include <media/v4l2-ioctl.h>
18 #include <media/videobuf2-v4l2.h>
19 #include <media/videobuf2-dma-contig.h>
20 #include <media/videobuf2-vmalloc.h>
21 #include "vpu.h"
22 #include "vpu_core.h"
23 #include "vpu_v4l2.h"
24 #include "vpu_msgs.h"
25 #include "vpu_helpers.h"
26
vpu_inst_lock(struct vpu_inst * inst)27 void vpu_inst_lock(struct vpu_inst *inst)
28 {
29 mutex_lock(&inst->lock);
30 }
31
vpu_inst_unlock(struct vpu_inst * inst)32 void vpu_inst_unlock(struct vpu_inst *inst)
33 {
34 mutex_unlock(&inst->lock);
35 }
36
vpu_get_vb_phy_addr(struct vb2_buffer * vb,u32 plane_no)37 dma_addr_t vpu_get_vb_phy_addr(struct vb2_buffer *vb, u32 plane_no)
38 {
39 if (plane_no >= vb->num_planes)
40 return 0;
41 return vb2_dma_contig_plane_dma_addr(vb, plane_no) +
42 vb->planes[plane_no].data_offset;
43 }
44
vpu_get_vb_length(struct vb2_buffer * vb,u32 plane_no)45 unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no)
46 {
47 if (plane_no >= vb->num_planes)
48 return 0;
49 return vb2_plane_size(vb, plane_no) - vb->planes[plane_no].data_offset;
50 }
51
vpu_set_buffer_state(struct vb2_v4l2_buffer * vbuf,unsigned int state)52 void vpu_set_buffer_state(struct vb2_v4l2_buffer *vbuf, unsigned int state)
53 {
54 struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf);
55
56 vpu_buf->state = state;
57 }
58
vpu_get_buffer_state(struct vb2_v4l2_buffer * vbuf)59 unsigned int vpu_get_buffer_state(struct vb2_v4l2_buffer *vbuf)
60 {
61 struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf);
62
63 return vpu_buf->state;
64 }
65
vpu_v4l2_set_error(struct vpu_inst * inst)66 void vpu_v4l2_set_error(struct vpu_inst *inst)
67 {
68 struct vb2_queue *src_q;
69 struct vb2_queue *dst_q;
70
71 vpu_inst_lock(inst);
72 dev_err(inst->dev, "some error occurs in codec\n");
73 if (inst->fh.m2m_ctx) {
74 src_q = v4l2_m2m_get_src_vq(inst->fh.m2m_ctx);
75 dst_q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
76 src_q->error = 1;
77 dst_q->error = 1;
78 wake_up(&src_q->done_wq);
79 wake_up(&dst_q->done_wq);
80 }
81 vpu_inst_unlock(inst);
82 }
83
vpu_notify_eos(struct vpu_inst * inst)84 int vpu_notify_eos(struct vpu_inst *inst)
85 {
86 static const struct v4l2_event ev = {
87 .id = 0,
88 .type = V4L2_EVENT_EOS
89 };
90
91 vpu_trace(inst->dev, "[%d]\n", inst->id);
92 v4l2_event_queue_fh(&inst->fh, &ev);
93
94 return 0;
95 }
96
vpu_notify_source_change(struct vpu_inst * inst)97 int vpu_notify_source_change(struct vpu_inst *inst)
98 {
99 static const struct v4l2_event ev = {
100 .id = 0,
101 .type = V4L2_EVENT_SOURCE_CHANGE,
102 .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION
103 };
104
105 vpu_trace(inst->dev, "[%d]\n", inst->id);
106 v4l2_event_queue_fh(&inst->fh, &ev);
107 return 0;
108 }
109
vpu_set_last_buffer_dequeued(struct vpu_inst * inst)110 int vpu_set_last_buffer_dequeued(struct vpu_inst *inst)
111 {
112 struct vb2_queue *q;
113
114 if (!inst || !inst->fh.m2m_ctx)
115 return -EINVAL;
116
117 q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
118 if (!list_empty(&q->done_list))
119 return -EINVAL;
120
121 if (q->last_buffer_dequeued)
122 return 0;
123 vpu_trace(inst->dev, "last buffer dequeued\n");
124 q->last_buffer_dequeued = true;
125 wake_up(&q->done_wq);
126 vpu_notify_eos(inst);
127 return 0;
128 }
129
vpu_is_source_empty(struct vpu_inst * inst)130 bool vpu_is_source_empty(struct vpu_inst *inst)
131 {
132 struct v4l2_m2m_buffer *buf = NULL;
133
134 if (!inst->fh.m2m_ctx)
135 return true;
136 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) {
137 if (vpu_get_buffer_state(&buf->vb) == VPU_BUF_STATE_IDLE)
138 return false;
139 }
140 return true;
141 }
142
vpu_try_fmt_common(struct vpu_inst * inst,struct v4l2_format * f)143 const struct vpu_format *vpu_try_fmt_common(struct vpu_inst *inst, struct v4l2_format *f)
144 {
145 struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
146 u32 type = f->type;
147 u32 stride = 1;
148 u32 bytesperline;
149 u32 sizeimage;
150 const struct vpu_format *fmt;
151 const struct vpu_core_resources *res;
152 int i;
153
154 fmt = vpu_helper_find_format(inst, type, pixmp->pixelformat);
155 if (!fmt) {
156 fmt = vpu_helper_enum_format(inst, type, 0);
157 if (!fmt)
158 return NULL;
159 pixmp->pixelformat = fmt->pixfmt;
160 }
161
162 res = vpu_get_resource(inst);
163 if (res)
164 stride = res->stride;
165 if (pixmp->width)
166 pixmp->width = vpu_helper_valid_frame_width(inst, pixmp->width);
167 if (pixmp->height)
168 pixmp->height = vpu_helper_valid_frame_height(inst, pixmp->height);
169 pixmp->flags = fmt->flags;
170 pixmp->num_planes = fmt->num_planes;
171 if (pixmp->field == V4L2_FIELD_ANY)
172 pixmp->field = V4L2_FIELD_NONE;
173 for (i = 0; i < pixmp->num_planes; i++) {
174 bytesperline = max_t(s32, pixmp->plane_fmt[i].bytesperline, 0);
175 sizeimage = vpu_helper_get_plane_size(pixmp->pixelformat,
176 pixmp->width,
177 pixmp->height,
178 i,
179 stride,
180 pixmp->field > V4L2_FIELD_NONE ? 1 : 0,
181 &bytesperline);
182 sizeimage = max_t(s32, pixmp->plane_fmt[i].sizeimage, sizeimage);
183 pixmp->plane_fmt[i].bytesperline = bytesperline;
184 pixmp->plane_fmt[i].sizeimage = sizeimage;
185 }
186
187 return fmt;
188 }
189
vpu_check_ready(struct vpu_inst * inst,u32 type)190 static bool vpu_check_ready(struct vpu_inst *inst, u32 type)
191 {
192 if (!inst)
193 return false;
194 if (inst->state == VPU_CODEC_STATE_DEINIT || inst->id < 0)
195 return false;
196 if (!inst->ops->check_ready)
197 return true;
198 return call_vop(inst, check_ready, type);
199 }
200
vpu_process_output_buffer(struct vpu_inst * inst)201 int vpu_process_output_buffer(struct vpu_inst *inst)
202 {
203 struct v4l2_m2m_buffer *buf = NULL;
204 struct vb2_v4l2_buffer *vbuf = NULL;
205
206 if (!inst || !inst->fh.m2m_ctx)
207 return -EINVAL;
208
209 if (!vpu_check_ready(inst, inst->out_format.type))
210 return -EINVAL;
211
212 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) {
213 vbuf = &buf->vb;
214 if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_IDLE)
215 break;
216 vbuf = NULL;
217 }
218
219 if (!vbuf)
220 return -EINVAL;
221
222 dev_dbg(inst->dev, "[%d]frame id = %d / %d\n",
223 inst->id, vbuf->sequence, inst->sequence);
224 return call_vop(inst, process_output, &vbuf->vb2_buf);
225 }
226
vpu_process_capture_buffer(struct vpu_inst * inst)227 int vpu_process_capture_buffer(struct vpu_inst *inst)
228 {
229 struct v4l2_m2m_buffer *buf = NULL;
230 struct vb2_v4l2_buffer *vbuf = NULL;
231
232 if (!inst || !inst->fh.m2m_ctx)
233 return -EINVAL;
234
235 if (!vpu_check_ready(inst, inst->cap_format.type))
236 return -EINVAL;
237
238 v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) {
239 vbuf = &buf->vb;
240 if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_IDLE)
241 break;
242 vbuf = NULL;
243 }
244 if (!vbuf)
245 return -EINVAL;
246
247 return call_vop(inst, process_capture, &vbuf->vb2_buf);
248 }
249
vpu_next_src_buf(struct vpu_inst * inst)250 struct vb2_v4l2_buffer *vpu_next_src_buf(struct vpu_inst *inst)
251 {
252 struct vb2_v4l2_buffer *src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx);
253
254 if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE)
255 return NULL;
256
257 while (vpu_vb_is_codecconfig(src_buf)) {
258 v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx);
259 vpu_set_buffer_state(src_buf, VPU_BUF_STATE_IDLE);
260 v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
261
262 src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx);
263 if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE)
264 return NULL;
265 }
266
267 return src_buf;
268 }
269
vpu_skip_frame(struct vpu_inst * inst,int count)270 void vpu_skip_frame(struct vpu_inst *inst, int count)
271 {
272 struct vb2_v4l2_buffer *src_buf;
273 enum vb2_buffer_state state;
274 int i = 0;
275
276 if (count <= 0)
277 return;
278
279 while (i < count) {
280 src_buf = v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx);
281 if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE)
282 return;
283 if (vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_DECODED)
284 state = VB2_BUF_STATE_DONE;
285 else
286 state = VB2_BUF_STATE_ERROR;
287 i++;
288 vpu_set_buffer_state(src_buf, VPU_BUF_STATE_IDLE);
289 v4l2_m2m_buf_done(src_buf, state);
290 }
291 }
292
vpu_find_buf_by_sequence(struct vpu_inst * inst,u32 type,u32 sequence)293 struct vb2_v4l2_buffer *vpu_find_buf_by_sequence(struct vpu_inst *inst, u32 type, u32 sequence)
294 {
295 struct v4l2_m2m_buffer *buf = NULL;
296 struct vb2_v4l2_buffer *vbuf = NULL;
297
298 if (!inst || !inst->fh.m2m_ctx)
299 return NULL;
300
301 if (V4L2_TYPE_IS_OUTPUT(type)) {
302 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) {
303 vbuf = &buf->vb;
304 if (vbuf->sequence == sequence)
305 break;
306 vbuf = NULL;
307 }
308 } else {
309 v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) {
310 vbuf = &buf->vb;
311 if (vbuf->sequence == sequence)
312 break;
313 vbuf = NULL;
314 }
315 }
316
317 return vbuf;
318 }
319
vpu_find_buf_by_idx(struct vpu_inst * inst,u32 type,u32 idx)320 struct vb2_v4l2_buffer *vpu_find_buf_by_idx(struct vpu_inst *inst, u32 type, u32 idx)
321 {
322 struct v4l2_m2m_buffer *buf = NULL;
323 struct vb2_v4l2_buffer *vbuf = NULL;
324
325 if (!inst || !inst->fh.m2m_ctx)
326 return NULL;
327
328 if (V4L2_TYPE_IS_OUTPUT(type)) {
329 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) {
330 vbuf = &buf->vb;
331 if (vbuf->vb2_buf.index == idx)
332 break;
333 vbuf = NULL;
334 }
335 } else {
336 v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) {
337 vbuf = &buf->vb;
338 if (vbuf->vb2_buf.index == idx)
339 break;
340 vbuf = NULL;
341 }
342 }
343
344 return vbuf;
345 }
346
vpu_get_num_buffers(struct vpu_inst * inst,u32 type)347 int vpu_get_num_buffers(struct vpu_inst *inst, u32 type)
348 {
349 struct vb2_queue *q;
350
351 if (!inst || !inst->fh.m2m_ctx)
352 return -EINVAL;
353
354 if (V4L2_TYPE_IS_OUTPUT(type))
355 q = v4l2_m2m_get_src_vq(inst->fh.m2m_ctx);
356 else
357 q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
358
359 return q->num_buffers;
360 }
361
vpu_m2m_device_run(void * priv)362 static void vpu_m2m_device_run(void *priv)
363 {
364 }
365
vpu_m2m_job_abort(void * priv)366 static void vpu_m2m_job_abort(void *priv)
367 {
368 struct vpu_inst *inst = priv;
369 struct v4l2_m2m_ctx *m2m_ctx = inst->fh.m2m_ctx;
370
371 v4l2_m2m_job_finish(m2m_ctx->m2m_dev, m2m_ctx);
372 }
373
374 static const struct v4l2_m2m_ops vpu_m2m_ops = {
375 .device_run = vpu_m2m_device_run,
376 .job_abort = vpu_m2m_job_abort
377 };
378
vpu_vb2_queue_setup(struct vb2_queue * vq,unsigned int * buf_count,unsigned int * plane_count,unsigned int psize[],struct device * allocators[])379 static int vpu_vb2_queue_setup(struct vb2_queue *vq,
380 unsigned int *buf_count,
381 unsigned int *plane_count,
382 unsigned int psize[],
383 struct device *allocators[])
384 {
385 struct vpu_inst *inst = vb2_get_drv_priv(vq);
386 struct vpu_format *cur_fmt;
387 int i;
388
389 cur_fmt = vpu_get_format(inst, vq->type);
390
391 if (*plane_count) {
392 if (*plane_count != cur_fmt->num_planes)
393 return -EINVAL;
394 for (i = 0; i < cur_fmt->num_planes; i++) {
395 if (psize[i] < cur_fmt->sizeimage[i])
396 return -EINVAL;
397 }
398 return 0;
399 }
400
401 if (V4L2_TYPE_IS_OUTPUT(vq->type))
402 *buf_count = max_t(unsigned int, *buf_count, inst->min_buffer_out);
403 else
404 *buf_count = max_t(unsigned int, *buf_count, inst->min_buffer_cap);
405 *plane_count = cur_fmt->num_planes;
406 for (i = 0; i < cur_fmt->num_planes; i++)
407 psize[i] = cur_fmt->sizeimage[i];
408
409 return 0;
410 }
411
vpu_vb2_buf_init(struct vb2_buffer * vb)412 static int vpu_vb2_buf_init(struct vb2_buffer *vb)
413 {
414 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
415
416 vpu_set_buffer_state(vbuf, VPU_BUF_STATE_IDLE);
417 return 0;
418 }
419
vpu_vb2_buf_out_validate(struct vb2_buffer * vb)420 static int vpu_vb2_buf_out_validate(struct vb2_buffer *vb)
421 {
422 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
423
424 vbuf->field = V4L2_FIELD_NONE;
425
426 return 0;
427 }
428
vpu_vb2_buf_prepare(struct vb2_buffer * vb)429 static int vpu_vb2_buf_prepare(struct vb2_buffer *vb)
430 {
431 struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
432 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
433 struct vpu_format *cur_fmt;
434 u32 i;
435
436 cur_fmt = vpu_get_format(inst, vb->type);
437 for (i = 0; i < cur_fmt->num_planes; i++) {
438 if (vpu_get_vb_length(vb, i) < cur_fmt->sizeimage[i]) {
439 dev_dbg(inst->dev, "[%d] %s buf[%d] is invalid\n",
440 inst->id, vpu_type_name(vb->type), vb->index);
441 vpu_set_buffer_state(vbuf, VPU_BUF_STATE_ERROR);
442 }
443 }
444
445 return 0;
446 }
447
vpu_vb2_buf_finish(struct vb2_buffer * vb)448 static void vpu_vb2_buf_finish(struct vb2_buffer *vb)
449 {
450 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
451 struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
452 struct vb2_queue *q = vb->vb2_queue;
453
454 if (vbuf->flags & V4L2_BUF_FLAG_LAST)
455 vpu_notify_eos(inst);
456
457 if (list_empty(&q->done_list))
458 call_void_vop(inst, on_queue_empty, q->type);
459 }
460
vpu_vb2_buffers_return(struct vpu_inst * inst,unsigned int type,enum vb2_buffer_state state)461 void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state)
462 {
463 struct vb2_v4l2_buffer *buf;
464
465 if (V4L2_TYPE_IS_OUTPUT(type)) {
466 while ((buf = v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx))) {
467 vpu_set_buffer_state(buf, VPU_BUF_STATE_IDLE);
468 v4l2_m2m_buf_done(buf, state);
469 }
470 } else {
471 while ((buf = v4l2_m2m_dst_buf_remove(inst->fh.m2m_ctx))) {
472 vpu_set_buffer_state(buf, VPU_BUF_STATE_IDLE);
473 v4l2_m2m_buf_done(buf, state);
474 }
475 }
476 }
477
vpu_vb2_start_streaming(struct vb2_queue * q,unsigned int count)478 static int vpu_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
479 {
480 struct vpu_inst *inst = vb2_get_drv_priv(q);
481 struct vpu_format *fmt = vpu_get_format(inst, q->type);
482 int ret;
483
484 vpu_inst_unlock(inst);
485 ret = vpu_inst_register(inst);
486 vpu_inst_lock(inst);
487 if (ret) {
488 vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_QUEUED);
489 return ret;
490 }
491
492 vpu_trace(inst->dev, "[%d] %s %c%c%c%c %dx%d %u(%u) %u(%u) %u(%u) %d\n",
493 inst->id, vpu_type_name(q->type),
494 fmt->pixfmt,
495 fmt->pixfmt >> 8,
496 fmt->pixfmt >> 16,
497 fmt->pixfmt >> 24,
498 fmt->width, fmt->height,
499 fmt->sizeimage[0], fmt->bytesperline[0],
500 fmt->sizeimage[1], fmt->bytesperline[1],
501 fmt->sizeimage[2], fmt->bytesperline[2],
502 q->num_buffers);
503 vb2_clear_last_buffer_dequeued(q);
504 ret = call_vop(inst, start, q->type);
505 if (ret)
506 vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_QUEUED);
507
508 return ret;
509 }
510
vpu_vb2_stop_streaming(struct vb2_queue * q)511 static void vpu_vb2_stop_streaming(struct vb2_queue *q)
512 {
513 struct vpu_inst *inst = vb2_get_drv_priv(q);
514
515 vpu_trace(inst->dev, "[%d] %s\n", inst->id, vpu_type_name(q->type));
516
517 call_void_vop(inst, stop, q->type);
518 vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_ERROR);
519 if (V4L2_TYPE_IS_OUTPUT(q->type))
520 inst->sequence = 0;
521 }
522
vpu_vb2_buf_queue(struct vb2_buffer * vb)523 static void vpu_vb2_buf_queue(struct vb2_buffer *vb)
524 {
525 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
526 struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
527
528 if (V4L2_TYPE_IS_OUTPUT(vb->type))
529 vbuf->sequence = inst->sequence++;
530
531 v4l2_m2m_buf_queue(inst->fh.m2m_ctx, vbuf);
532 vpu_process_output_buffer(inst);
533 vpu_process_capture_buffer(inst);
534 }
535
536 static const struct vb2_ops vpu_vb2_ops = {
537 .queue_setup = vpu_vb2_queue_setup,
538 .buf_init = vpu_vb2_buf_init,
539 .buf_out_validate = vpu_vb2_buf_out_validate,
540 .buf_prepare = vpu_vb2_buf_prepare,
541 .buf_finish = vpu_vb2_buf_finish,
542 .start_streaming = vpu_vb2_start_streaming,
543 .stop_streaming = vpu_vb2_stop_streaming,
544 .buf_queue = vpu_vb2_buf_queue,
545 .wait_prepare = vb2_ops_wait_prepare,
546 .wait_finish = vb2_ops_wait_finish,
547 };
548
vpu_m2m_queue_init(void * priv,struct vb2_queue * src_vq,struct vb2_queue * dst_vq)549 static int vpu_m2m_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
550 {
551 struct vpu_inst *inst = priv;
552 int ret;
553
554 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
555 inst->out_format.type = src_vq->type;
556 src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
557 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
558 src_vq->ops = &vpu_vb2_ops;
559 src_vq->mem_ops = &vb2_dma_contig_memops;
560 if (inst->type == VPU_CORE_TYPE_DEC && inst->use_stream_buffer)
561 src_vq->mem_ops = &vb2_vmalloc_memops;
562 src_vq->drv_priv = inst;
563 src_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer);
564 src_vq->min_buffers_needed = 1;
565 src_vq->dev = inst->vpu->dev;
566 src_vq->lock = &inst->lock;
567 ret = vb2_queue_init(src_vq);
568 if (ret)
569 return ret;
570
571 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
572 inst->cap_format.type = dst_vq->type;
573 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
574 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
575 dst_vq->ops = &vpu_vb2_ops;
576 dst_vq->mem_ops = &vb2_dma_contig_memops;
577 if (inst->type == VPU_CORE_TYPE_ENC && inst->use_stream_buffer)
578 dst_vq->mem_ops = &vb2_vmalloc_memops;
579 dst_vq->drv_priv = inst;
580 dst_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer);
581 dst_vq->min_buffers_needed = 1;
582 dst_vq->dev = inst->vpu->dev;
583 dst_vq->lock = &inst->lock;
584 ret = vb2_queue_init(dst_vq);
585 if (ret) {
586 vb2_queue_release(src_vq);
587 return ret;
588 }
589
590 return 0;
591 }
592
vpu_v4l2_release(struct vpu_inst * inst)593 static int vpu_v4l2_release(struct vpu_inst *inst)
594 {
595 vpu_trace(inst->vpu->dev, "%p\n", inst);
596
597 vpu_release_core(inst->core);
598 put_device(inst->dev);
599
600 if (inst->workqueue) {
601 cancel_work_sync(&inst->msg_work);
602 destroy_workqueue(inst->workqueue);
603 inst->workqueue = NULL;
604 }
605
606 v4l2_ctrl_handler_free(&inst->ctrl_handler);
607 mutex_destroy(&inst->lock);
608 v4l2_fh_del(&inst->fh);
609 v4l2_fh_exit(&inst->fh);
610
611 call_void_vop(inst, cleanup);
612
613 return 0;
614 }
615
vpu_v4l2_open(struct file * file,struct vpu_inst * inst)616 int vpu_v4l2_open(struct file *file, struct vpu_inst *inst)
617 {
618 struct vpu_dev *vpu = video_drvdata(file);
619 struct vpu_func *func;
620 int ret = 0;
621
622 if (!inst || !inst->ops)
623 return -EINVAL;
624
625 if (inst->type == VPU_CORE_TYPE_ENC)
626 func = &vpu->encoder;
627 else
628 func = &vpu->decoder;
629
630 atomic_set(&inst->ref_count, 0);
631 vpu_inst_get(inst);
632 inst->vpu = vpu;
633 inst->core = vpu_request_core(vpu, inst->type);
634 if (inst->core)
635 inst->dev = get_device(inst->core->dev);
636 mutex_init(&inst->lock);
637 INIT_LIST_HEAD(&inst->cmd_q);
638 inst->id = VPU_INST_NULL_ID;
639 inst->release = vpu_v4l2_release;
640 inst->pid = current->pid;
641 inst->tgid = current->tgid;
642 inst->min_buffer_cap = 2;
643 inst->min_buffer_out = 2;
644 v4l2_fh_init(&inst->fh, func->vfd);
645 v4l2_fh_add(&inst->fh);
646
647 ret = call_vop(inst, ctrl_init);
648 if (ret)
649 goto error;
650
651 inst->fh.m2m_ctx = v4l2_m2m_ctx_init(func->m2m_dev, inst, vpu_m2m_queue_init);
652 if (IS_ERR(inst->fh.m2m_ctx)) {
653 dev_err(vpu->dev, "v4l2_m2m_ctx_init fail\n");
654 ret = PTR_ERR(inst->fh.m2m_ctx);
655 goto error;
656 }
657
658 inst->fh.ctrl_handler = &inst->ctrl_handler;
659 file->private_data = &inst->fh;
660 inst->state = VPU_CODEC_STATE_DEINIT;
661 inst->workqueue = alloc_workqueue("vpu_inst", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
662 if (inst->workqueue) {
663 INIT_WORK(&inst->msg_work, vpu_inst_run_work);
664 ret = kfifo_init(&inst->msg_fifo,
665 inst->msg_buffer,
666 rounddown_pow_of_two(sizeof(inst->msg_buffer)));
667 if (ret) {
668 destroy_workqueue(inst->workqueue);
669 inst->workqueue = NULL;
670 }
671 }
672 vpu_trace(vpu->dev, "tgid = %d, pid = %d, type = %s, inst = %p\n",
673 inst->tgid, inst->pid, vpu_core_type_desc(inst->type), inst);
674
675 return 0;
676 error:
677 vpu_inst_put(inst);
678 return ret;
679 }
680
vpu_v4l2_close(struct file * file)681 int vpu_v4l2_close(struct file *file)
682 {
683 struct vpu_dev *vpu = video_drvdata(file);
684 struct vpu_inst *inst = to_inst(file);
685
686 vpu_trace(vpu->dev, "tgid = %d, pid = %d, inst = %p\n", inst->tgid, inst->pid, inst);
687
688 vpu_inst_lock(inst);
689 if (inst->fh.m2m_ctx) {
690 v4l2_m2m_ctx_release(inst->fh.m2m_ctx);
691 inst->fh.m2m_ctx = NULL;
692 }
693 vpu_inst_unlock(inst);
694
695 call_void_vop(inst, release);
696 vpu_inst_unregister(inst);
697 vpu_inst_put(inst);
698
699 return 0;
700 }
701
vpu_add_func(struct vpu_dev * vpu,struct vpu_func * func)702 int vpu_add_func(struct vpu_dev *vpu, struct vpu_func *func)
703 {
704 struct video_device *vfd;
705 int ret;
706
707 if (!vpu || !func)
708 return -EINVAL;
709
710 if (func->vfd)
711 return 0;
712
713 func->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
714 if (IS_ERR(func->m2m_dev)) {
715 dev_err(vpu->dev, "v4l2_m2m_init fail\n");
716 func->vfd = NULL;
717 return PTR_ERR(func->m2m_dev);
718 }
719
720 vfd = video_device_alloc();
721 if (!vfd) {
722 v4l2_m2m_release(func->m2m_dev);
723 dev_err(vpu->dev, "alloc vpu decoder video device fail\n");
724 return -ENOMEM;
725 }
726 vfd->release = video_device_release;
727 vfd->vfl_dir = VFL_DIR_M2M;
728 vfd->v4l2_dev = &vpu->v4l2_dev;
729 vfd->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
730 if (func->type == VPU_CORE_TYPE_ENC) {
731 strscpy(vfd->name, "amphion-vpu-encoder", sizeof(vfd->name));
732 vfd->fops = venc_get_fops();
733 vfd->ioctl_ops = venc_get_ioctl_ops();
734 } else {
735 strscpy(vfd->name, "amphion-vpu-decoder", sizeof(vfd->name));
736 vfd->fops = vdec_get_fops();
737 vfd->ioctl_ops = vdec_get_ioctl_ops();
738 }
739
740 ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
741 if (ret) {
742 video_device_release(vfd);
743 v4l2_m2m_release(func->m2m_dev);
744 return ret;
745 }
746 video_set_drvdata(vfd, vpu);
747 func->vfd = vfd;
748
749 ret = v4l2_m2m_register_media_controller(func->m2m_dev, func->vfd, func->function);
750 if (ret) {
751 v4l2_m2m_release(func->m2m_dev);
752 func->m2m_dev = NULL;
753 video_unregister_device(func->vfd);
754 func->vfd = NULL;
755 return ret;
756 }
757
758 return 0;
759 }
760
vpu_remove_func(struct vpu_func * func)761 void vpu_remove_func(struct vpu_func *func)
762 {
763 if (!func)
764 return;
765
766 if (func->m2m_dev) {
767 v4l2_m2m_unregister_media_controller(func->m2m_dev);
768 v4l2_m2m_release(func->m2m_dev);
769 func->m2m_dev = NULL;
770 }
771 if (func->vfd) {
772 video_unregister_device(func->vfd);
773 func->vfd = NULL;
774 }
775 }
776