1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Hantro VPU codec driver
4 *
5 * Copyright (C) 2018 Collabora, Ltd.
6 * Copyright 2018 Google LLC.
7 * Tomasz Figa <tfiga@chromium.org>
8 *
9 * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
10 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
11 */
12
13 #include <linux/clk.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/slab.h>
20 #include <linux/videodev2.h>
21 #include <linux/workqueue.h>
22 #include <media/v4l2-event.h>
23 #include <media/v4l2-mem2mem.h>
24 #include <media/videobuf2-core.h>
25 #include <media/videobuf2-vmalloc.h>
26
27 #include "hantro_v4l2.h"
28 #include "hantro.h"
29 #include "hantro_hw.h"
30
31 #define DRIVER_NAME "hantro-vpu"
32
33 int hantro_debug;
34 module_param_named(debug, hantro_debug, int, 0644);
35 MODULE_PARM_DESC(debug,
36 "Debug level - higher value produces more verbose messages");
37
hantro_get_ctrl(struct hantro_ctx * ctx,u32 id)38 void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id)
39 {
40 struct v4l2_ctrl *ctrl;
41
42 ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, id);
43 return ctrl ? ctrl->p_cur.p : NULL;
44 }
45
hantro_get_ref(struct hantro_ctx * ctx,u64 ts)46 dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
47 {
48 struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
49 struct vb2_buffer *buf;
50 int index;
51
52 index = vb2_find_timestamp(q, ts, 0);
53 if (index < 0)
54 return 0;
55 buf = vb2_get_buffer(q, index);
56 return hantro_get_dec_buf_addr(ctx, buf);
57 }
58
59 static const struct v4l2_event hantro_eos_event = {
60 .type = V4L2_EVENT_EOS
61 };
62
hantro_job_finish_no_pm(struct hantro_dev * vpu,struct hantro_ctx * ctx,enum vb2_buffer_state result)63 static void hantro_job_finish_no_pm(struct hantro_dev *vpu,
64 struct hantro_ctx *ctx,
65 enum vb2_buffer_state result)
66 {
67 struct vb2_v4l2_buffer *src, *dst;
68
69 src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
70 dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
71
72 if (WARN_ON(!src))
73 return;
74 if (WARN_ON(!dst))
75 return;
76
77 src->sequence = ctx->sequence_out++;
78 dst->sequence = ctx->sequence_cap++;
79
80 if (v4l2_m2m_is_last_draining_src_buf(ctx->fh.m2m_ctx, src)) {
81 dst->flags |= V4L2_BUF_FLAG_LAST;
82 v4l2_event_queue_fh(&ctx->fh, &hantro_eos_event);
83 v4l2_m2m_mark_stopped(ctx->fh.m2m_ctx);
84 }
85
86 v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
87 result);
88 }
89
hantro_job_finish(struct hantro_dev * vpu,struct hantro_ctx * ctx,enum vb2_buffer_state result)90 static void hantro_job_finish(struct hantro_dev *vpu,
91 struct hantro_ctx *ctx,
92 enum vb2_buffer_state result)
93 {
94 pm_runtime_mark_last_busy(vpu->dev);
95 pm_runtime_put_autosuspend(vpu->dev);
96
97 clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
98
99 hantro_job_finish_no_pm(vpu, ctx, result);
100 }
101
hantro_irq_done(struct hantro_dev * vpu,enum vb2_buffer_state result)102 void hantro_irq_done(struct hantro_dev *vpu,
103 enum vb2_buffer_state result)
104 {
105 struct hantro_ctx *ctx =
106 v4l2_m2m_get_curr_priv(vpu->m2m_dev);
107
108 /*
109 * If cancel_delayed_work returns false
110 * the timeout expired. The watchdog is running,
111 * and will take care of finishing the job.
112 */
113 if (cancel_delayed_work(&vpu->watchdog_work)) {
114 if (result == VB2_BUF_STATE_DONE && ctx->codec_ops->done)
115 ctx->codec_ops->done(ctx);
116 hantro_job_finish(vpu, ctx, result);
117 }
118 }
119
hantro_watchdog(struct work_struct * work)120 void hantro_watchdog(struct work_struct *work)
121 {
122 struct hantro_dev *vpu;
123 struct hantro_ctx *ctx;
124
125 vpu = container_of(to_delayed_work(work),
126 struct hantro_dev, watchdog_work);
127 ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
128 if (ctx) {
129 vpu_err("frame processing timed out!\n");
130 ctx->codec_ops->reset(ctx);
131 hantro_job_finish(vpu, ctx, VB2_BUF_STATE_ERROR);
132 }
133 }
134
hantro_start_prepare_run(struct hantro_ctx * ctx)135 void hantro_start_prepare_run(struct hantro_ctx *ctx)
136 {
137 struct vb2_v4l2_buffer *src_buf;
138
139 src_buf = hantro_get_src_buf(ctx);
140 v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
141 &ctx->ctrl_handler);
142
143 if (!ctx->is_encoder && !ctx->dev->variant->late_postproc) {
144 if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
145 hantro_postproc_enable(ctx);
146 else
147 hantro_postproc_disable(ctx);
148 }
149 }
150
hantro_end_prepare_run(struct hantro_ctx * ctx)151 void hantro_end_prepare_run(struct hantro_ctx *ctx)
152 {
153 struct vb2_v4l2_buffer *src_buf;
154
155 if (!ctx->is_encoder && ctx->dev->variant->late_postproc) {
156 if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
157 hantro_postproc_enable(ctx);
158 else
159 hantro_postproc_disable(ctx);
160 }
161
162 src_buf = hantro_get_src_buf(ctx);
163 v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
164 &ctx->ctrl_handler);
165
166 /* Kick the watchdog. */
167 schedule_delayed_work(&ctx->dev->watchdog_work,
168 msecs_to_jiffies(2000));
169 }
170
device_run(void * priv)171 static void device_run(void *priv)
172 {
173 struct hantro_ctx *ctx = priv;
174 struct vb2_v4l2_buffer *src, *dst;
175 int ret;
176
177 src = hantro_get_src_buf(ctx);
178 dst = hantro_get_dst_buf(ctx);
179
180 ret = pm_runtime_resume_and_get(ctx->dev->dev);
181 if (ret < 0)
182 goto err_cancel_job;
183
184 ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
185 if (ret)
186 goto err_cancel_job;
187
188 v4l2_m2m_buf_copy_metadata(src, dst, true);
189
190 if (ctx->codec_ops->run(ctx))
191 goto err_cancel_job;
192
193 return;
194
195 err_cancel_job:
196 hantro_job_finish_no_pm(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
197 }
198
199 static const struct v4l2_m2m_ops vpu_m2m_ops = {
200 .device_run = device_run,
201 };
202
203 static int
queue_init(void * priv,struct vb2_queue * src_vq,struct vb2_queue * dst_vq)204 queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
205 {
206 struct hantro_ctx *ctx = priv;
207 int ret;
208
209 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
210 src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
211 src_vq->drv_priv = ctx;
212 src_vq->ops = &hantro_queue_ops;
213 src_vq->mem_ops = &vb2_dma_contig_memops;
214
215 /*
216 * Driver does mostly sequential access, so sacrifice TLB efficiency
217 * for faster allocation. Also, no CPU access on the source queue,
218 * so no kernel mapping needed.
219 */
220 src_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
221 DMA_ATTR_NO_KERNEL_MAPPING;
222 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
223 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
224 src_vq->lock = &ctx->dev->vpu_mutex;
225 src_vq->dev = ctx->dev->v4l2_dev.dev;
226 src_vq->supports_requests = true;
227
228 ret = vb2_queue_init(src_vq);
229 if (ret)
230 return ret;
231
232 dst_vq->bidirectional = true;
233 dst_vq->mem_ops = &vb2_dma_contig_memops;
234 dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES;
235 /*
236 * The Kernel needs access to the JPEG destination buffer for the
237 * JPEG encoder to fill in the JPEG headers.
238 */
239 if (!ctx->is_encoder)
240 dst_vq->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
241
242 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
243 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
244 dst_vq->drv_priv = ctx;
245 dst_vq->ops = &hantro_queue_ops;
246 dst_vq->buf_struct_size = sizeof(struct hantro_decoded_buffer);
247 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
248 dst_vq->lock = &ctx->dev->vpu_mutex;
249 dst_vq->dev = ctx->dev->v4l2_dev.dev;
250
251 return vb2_queue_init(dst_vq);
252 }
253
hantro_try_ctrl(struct v4l2_ctrl * ctrl)254 static int hantro_try_ctrl(struct v4l2_ctrl *ctrl)
255 {
256 if (ctrl->id == V4L2_CID_STATELESS_H264_SPS) {
257 const struct v4l2_ctrl_h264_sps *sps = ctrl->p_new.p_h264_sps;
258
259 if (sps->chroma_format_idc > 1)
260 /* Only 4:0:0 and 4:2:0 are supported */
261 return -EINVAL;
262 if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
263 /* Luma and chroma bit depth mismatch */
264 return -EINVAL;
265 if (sps->bit_depth_luma_minus8 != 0)
266 /* Only 8-bit is supported */
267 return -EINVAL;
268 } else if (ctrl->id == V4L2_CID_MPEG_VIDEO_HEVC_SPS) {
269 const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps;
270
271 if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
272 /* Luma and chroma bit depth mismatch */
273 return -EINVAL;
274 if (sps->bit_depth_luma_minus8 != 0)
275 /* Only 8-bit is supported */
276 return -EINVAL;
277 } else if (ctrl->id == V4L2_CID_STATELESS_VP9_FRAME) {
278 const struct v4l2_ctrl_vp9_frame *dec_params = ctrl->p_new.p_vp9_frame;
279
280 /* We only support profile 0 */
281 if (dec_params->profile != 0)
282 return -EINVAL;
283 }
284 return 0;
285 }
286
hantro_jpeg_s_ctrl(struct v4l2_ctrl * ctrl)287 static int hantro_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
288 {
289 struct hantro_ctx *ctx;
290
291 ctx = container_of(ctrl->handler,
292 struct hantro_ctx, ctrl_handler);
293
294 vpu_debug(1, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
295
296 switch (ctrl->id) {
297 case V4L2_CID_JPEG_COMPRESSION_QUALITY:
298 ctx->jpeg_quality = ctrl->val;
299 break;
300 default:
301 return -EINVAL;
302 }
303
304 return 0;
305 }
306
hantro_hevc_s_ctrl(struct v4l2_ctrl * ctrl)307 static int hantro_hevc_s_ctrl(struct v4l2_ctrl *ctrl)
308 {
309 struct hantro_ctx *ctx;
310
311 ctx = container_of(ctrl->handler,
312 struct hantro_ctx, ctrl_handler);
313
314 vpu_debug(1, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
315
316 switch (ctrl->id) {
317 case V4L2_CID_HANTRO_HEVC_SLICE_HEADER_SKIP:
318 ctx->hevc_dec.ctrls.hevc_hdr_skip_length = ctrl->val;
319 break;
320 default:
321 return -EINVAL;
322 }
323
324 return 0;
325 }
326
327 static const struct v4l2_ctrl_ops hantro_ctrl_ops = {
328 .try_ctrl = hantro_try_ctrl,
329 };
330
331 static const struct v4l2_ctrl_ops hantro_jpeg_ctrl_ops = {
332 .s_ctrl = hantro_jpeg_s_ctrl,
333 };
334
335 static const struct v4l2_ctrl_ops hantro_hevc_ctrl_ops = {
336 .s_ctrl = hantro_hevc_s_ctrl,
337 };
338
339 #define HANTRO_JPEG_ACTIVE_MARKERS (V4L2_JPEG_ACTIVE_MARKER_APP0 | \
340 V4L2_JPEG_ACTIVE_MARKER_COM | \
341 V4L2_JPEG_ACTIVE_MARKER_DQT | \
342 V4L2_JPEG_ACTIVE_MARKER_DHT)
343
344 static const struct hantro_ctrl controls[] = {
345 {
346 .codec = HANTRO_JPEG_ENCODER,
347 .cfg = {
348 .id = V4L2_CID_JPEG_COMPRESSION_QUALITY,
349 .min = 5,
350 .max = 100,
351 .step = 1,
352 .def = 50,
353 .ops = &hantro_jpeg_ctrl_ops,
354 },
355 }, {
356 .codec = HANTRO_JPEG_ENCODER,
357 .cfg = {
358 .id = V4L2_CID_JPEG_ACTIVE_MARKER,
359 .max = HANTRO_JPEG_ACTIVE_MARKERS,
360 .def = HANTRO_JPEG_ACTIVE_MARKERS,
361 /*
362 * Changing the set of active markers/segments also
363 * messes up the alignment of the JPEG header, which
364 * is needed to allow the hardware to write directly
365 * to the output buffer. Implementing this introduces
366 * a lot of complexity for little gain, as the markers
367 * enabled is already the minimum required set.
368 */
369 .flags = V4L2_CTRL_FLAG_READ_ONLY,
370 },
371 }, {
372 .codec = HANTRO_MPEG2_DECODER,
373 .cfg = {
374 .id = V4L2_CID_STATELESS_MPEG2_SEQUENCE,
375 },
376 }, {
377 .codec = HANTRO_MPEG2_DECODER,
378 .cfg = {
379 .id = V4L2_CID_STATELESS_MPEG2_PICTURE,
380 },
381 }, {
382 .codec = HANTRO_MPEG2_DECODER,
383 .cfg = {
384 .id = V4L2_CID_STATELESS_MPEG2_QUANTISATION,
385 },
386 }, {
387 .codec = HANTRO_VP8_DECODER,
388 .cfg = {
389 .id = V4L2_CID_STATELESS_VP8_FRAME,
390 },
391 }, {
392 .codec = HANTRO_H264_DECODER,
393 .cfg = {
394 .id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
395 },
396 }, {
397 .codec = HANTRO_H264_DECODER,
398 .cfg = {
399 .id = V4L2_CID_STATELESS_H264_SPS,
400 .ops = &hantro_ctrl_ops,
401 },
402 }, {
403 .codec = HANTRO_H264_DECODER,
404 .cfg = {
405 .id = V4L2_CID_STATELESS_H264_PPS,
406 },
407 }, {
408 .codec = HANTRO_H264_DECODER,
409 .cfg = {
410 .id = V4L2_CID_STATELESS_H264_SCALING_MATRIX,
411 },
412 }, {
413 .codec = HANTRO_H264_DECODER,
414 .cfg = {
415 .id = V4L2_CID_STATELESS_H264_DECODE_MODE,
416 .min = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
417 .def = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
418 .max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
419 },
420 }, {
421 .codec = HANTRO_H264_DECODER,
422 .cfg = {
423 .id = V4L2_CID_STATELESS_H264_START_CODE,
424 .min = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
425 .def = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
426 .max = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
427 },
428 }, {
429 .codec = HANTRO_H264_DECODER,
430 .cfg = {
431 .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
432 .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
433 .max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
434 .menu_skip_mask =
435 BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
436 .def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
437 }
438 }, {
439 .codec = HANTRO_HEVC_DECODER,
440 .cfg = {
441 .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE,
442 .min = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED,
443 .max = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED,
444 .def = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED,
445 },
446 }, {
447 .codec = HANTRO_HEVC_DECODER,
448 .cfg = {
449 .id = V4L2_CID_MPEG_VIDEO_HEVC_START_CODE,
450 .min = V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B,
451 .max = V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B,
452 .def = V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B,
453 },
454 }, {
455 .codec = HANTRO_HEVC_DECODER,
456 .cfg = {
457 .id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
458 .min = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
459 .max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
460 .def = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
461 },
462 }, {
463 .codec = HANTRO_HEVC_DECODER,
464 .cfg = {
465 .id = V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
466 .min = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
467 .max = V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1,
468 },
469 }, {
470 .codec = HANTRO_HEVC_DECODER,
471 .cfg = {
472 .id = V4L2_CID_MPEG_VIDEO_HEVC_SPS,
473 .ops = &hantro_ctrl_ops,
474 },
475 }, {
476 .codec = HANTRO_HEVC_DECODER,
477 .cfg = {
478 .id = V4L2_CID_MPEG_VIDEO_HEVC_PPS,
479 },
480 }, {
481 .codec = HANTRO_HEVC_DECODER,
482 .cfg = {
483 .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS,
484 },
485 }, {
486 .codec = HANTRO_HEVC_DECODER,
487 .cfg = {
488 .id = V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX,
489 },
490 }, {
491 .codec = HANTRO_HEVC_DECODER,
492 .cfg = {
493 .id = V4L2_CID_HANTRO_HEVC_SLICE_HEADER_SKIP,
494 .name = "Hantro HEVC slice header skip bytes",
495 .type = V4L2_CTRL_TYPE_INTEGER,
496 .min = 0,
497 .def = 0,
498 .max = 0x100,
499 .step = 1,
500 .ops = &hantro_hevc_ctrl_ops,
501 },
502 }, {
503 .codec = HANTRO_VP9_DECODER,
504 .cfg = {
505 .id = V4L2_CID_STATELESS_VP9_FRAME,
506 },
507 }, {
508 .codec = HANTRO_VP9_DECODER,
509 .cfg = {
510 .id = V4L2_CID_STATELESS_VP9_COMPRESSED_HDR,
511 },
512 },
513 };
514
hantro_ctrls_setup(struct hantro_dev * vpu,struct hantro_ctx * ctx,int allowed_codecs)515 static int hantro_ctrls_setup(struct hantro_dev *vpu,
516 struct hantro_ctx *ctx,
517 int allowed_codecs)
518 {
519 int i, num_ctrls = ARRAY_SIZE(controls);
520
521 v4l2_ctrl_handler_init(&ctx->ctrl_handler, num_ctrls);
522
523 for (i = 0; i < num_ctrls; i++) {
524 if (!(allowed_codecs & controls[i].codec))
525 continue;
526
527 v4l2_ctrl_new_custom(&ctx->ctrl_handler,
528 &controls[i].cfg, NULL);
529 if (ctx->ctrl_handler.error) {
530 vpu_err("Adding control (%d) failed %d\n",
531 controls[i].cfg.id,
532 ctx->ctrl_handler.error);
533 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
534 return ctx->ctrl_handler.error;
535 }
536 }
537 return v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
538 }
539
540 /*
541 * V4L2 file operations.
542 */
543
hantro_open(struct file * filp)544 static int hantro_open(struct file *filp)
545 {
546 struct hantro_dev *vpu = video_drvdata(filp);
547 struct video_device *vdev = video_devdata(filp);
548 struct hantro_func *func = hantro_vdev_to_func(vdev);
549 struct hantro_ctx *ctx;
550 int allowed_codecs, ret;
551
552 /*
553 * We do not need any extra locking here, because we operate only
554 * on local data here, except reading few fields from dev, which
555 * do not change through device's lifetime (which is guaranteed by
556 * reference on module from open()) and V4L2 internal objects (such
557 * as vdev and ctx->fh), which have proper locking done in respective
558 * helper functions used here.
559 */
560
561 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
562 if (!ctx)
563 return -ENOMEM;
564
565 ctx->dev = vpu;
566 if (func->id == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
567 allowed_codecs = vpu->variant->codec & HANTRO_ENCODERS;
568 ctx->is_encoder = true;
569 } else if (func->id == MEDIA_ENT_F_PROC_VIDEO_DECODER) {
570 allowed_codecs = vpu->variant->codec & HANTRO_DECODERS;
571 ctx->is_encoder = false;
572 } else {
573 ret = -ENODEV;
574 goto err_ctx_free;
575 }
576
577 ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx, queue_init);
578 if (IS_ERR(ctx->fh.m2m_ctx)) {
579 ret = PTR_ERR(ctx->fh.m2m_ctx);
580 goto err_ctx_free;
581 }
582
583 v4l2_fh_init(&ctx->fh, vdev);
584 filp->private_data = &ctx->fh;
585 v4l2_fh_add(&ctx->fh);
586
587 hantro_reset_fmts(ctx);
588
589 ret = hantro_ctrls_setup(vpu, ctx, allowed_codecs);
590 if (ret) {
591 vpu_err("Failed to set up controls\n");
592 goto err_fh_free;
593 }
594 ctx->fh.ctrl_handler = &ctx->ctrl_handler;
595
596 return 0;
597
598 err_fh_free:
599 v4l2_fh_del(&ctx->fh);
600 v4l2_fh_exit(&ctx->fh);
601 err_ctx_free:
602 kfree(ctx);
603 return ret;
604 }
605
hantro_release(struct file * filp)606 static int hantro_release(struct file *filp)
607 {
608 struct hantro_ctx *ctx =
609 container_of(filp->private_data, struct hantro_ctx, fh);
610
611 /*
612 * No need for extra locking because this was the last reference
613 * to this file.
614 */
615 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
616 v4l2_fh_del(&ctx->fh);
617 v4l2_fh_exit(&ctx->fh);
618 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
619 kfree(ctx);
620
621 return 0;
622 }
623
624 static const struct v4l2_file_operations hantro_fops = {
625 .owner = THIS_MODULE,
626 .open = hantro_open,
627 .release = hantro_release,
628 .poll = v4l2_m2m_fop_poll,
629 .unlocked_ioctl = video_ioctl2,
630 .mmap = v4l2_m2m_fop_mmap,
631 };
632
633 static const struct of_device_id of_hantro_match[] = {
634 #ifdef CONFIG_VIDEO_HANTRO_ROCKCHIP
635 { .compatible = "rockchip,px30-vpu", .data = &px30_vpu_variant, },
636 { .compatible = "rockchip,rk3036-vpu", .data = &rk3036_vpu_variant, },
637 { .compatible = "rockchip,rk3066-vpu", .data = &rk3066_vpu_variant, },
638 { .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, },
639 { .compatible = "rockchip,rk3328-vpu", .data = &rk3328_vpu_variant, },
640 { .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
641 { .compatible = "rockchip,rk3568-vpu", .data = &rk3568_vpu_variant, },
642 #endif
643 #ifdef CONFIG_VIDEO_HANTRO_IMX8M
644 { .compatible = "nxp,imx8mm-vpu-g1", .data = &imx8mm_vpu_g1_variant, },
645 { .compatible = "nxp,imx8mq-vpu", .data = &imx8mq_vpu_variant, },
646 { .compatible = "nxp,imx8mq-vpu-g1", .data = &imx8mq_vpu_g1_variant },
647 { .compatible = "nxp,imx8mq-vpu-g2", .data = &imx8mq_vpu_g2_variant },
648 #endif
649 #ifdef CONFIG_VIDEO_HANTRO_SAMA5D4
650 { .compatible = "microchip,sama5d4-vdec", .data = &sama5d4_vdec_variant, },
651 #endif
652 #ifdef CONFIG_VIDEO_HANTRO_SUNXI
653 { .compatible = "allwinner,sun50i-h6-vpu-g2", .data = &sunxi_vpu_variant, },
654 #endif
655 { /* sentinel */ }
656 };
657 MODULE_DEVICE_TABLE(of, of_hantro_match);
658
hantro_register_entity(struct media_device * mdev,struct media_entity * entity,const char * entity_name,struct media_pad * pads,int num_pads,int function,struct video_device * vdev)659 static int hantro_register_entity(struct media_device *mdev,
660 struct media_entity *entity,
661 const char *entity_name,
662 struct media_pad *pads, int num_pads,
663 int function, struct video_device *vdev)
664 {
665 char *name;
666 int ret;
667
668 entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
669 if (function == MEDIA_ENT_F_IO_V4L) {
670 entity->info.dev.major = VIDEO_MAJOR;
671 entity->info.dev.minor = vdev->minor;
672 }
673
674 name = devm_kasprintf(mdev->dev, GFP_KERNEL, "%s-%s", vdev->name,
675 entity_name);
676 if (!name)
677 return -ENOMEM;
678
679 entity->name = name;
680 entity->function = function;
681
682 ret = media_entity_pads_init(entity, num_pads, pads);
683 if (ret)
684 return ret;
685
686 ret = media_device_register_entity(mdev, entity);
687 if (ret)
688 return ret;
689
690 return 0;
691 }
692
hantro_attach_func(struct hantro_dev * vpu,struct hantro_func * func)693 static int hantro_attach_func(struct hantro_dev *vpu,
694 struct hantro_func *func)
695 {
696 struct media_device *mdev = &vpu->mdev;
697 struct media_link *link;
698 int ret;
699
700 /* Create the three encoder entities with their pads */
701 func->source_pad.flags = MEDIA_PAD_FL_SOURCE;
702 ret = hantro_register_entity(mdev, &func->vdev.entity, "source",
703 &func->source_pad, 1, MEDIA_ENT_F_IO_V4L,
704 &func->vdev);
705 if (ret)
706 return ret;
707
708 func->proc_pads[0].flags = MEDIA_PAD_FL_SINK;
709 func->proc_pads[1].flags = MEDIA_PAD_FL_SOURCE;
710 ret = hantro_register_entity(mdev, &func->proc, "proc",
711 func->proc_pads, 2, func->id,
712 &func->vdev);
713 if (ret)
714 goto err_rel_entity0;
715
716 func->sink_pad.flags = MEDIA_PAD_FL_SINK;
717 ret = hantro_register_entity(mdev, &func->sink, "sink",
718 &func->sink_pad, 1, MEDIA_ENT_F_IO_V4L,
719 &func->vdev);
720 if (ret)
721 goto err_rel_entity1;
722
723 /* Connect the three entities */
724 ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 0,
725 MEDIA_LNK_FL_IMMUTABLE |
726 MEDIA_LNK_FL_ENABLED);
727 if (ret)
728 goto err_rel_entity2;
729
730 ret = media_create_pad_link(&func->proc, 1, &func->sink, 0,
731 MEDIA_LNK_FL_IMMUTABLE |
732 MEDIA_LNK_FL_ENABLED);
733 if (ret)
734 goto err_rm_links0;
735
736 /* Create video interface */
737 func->intf_devnode = media_devnode_create(mdev, MEDIA_INTF_T_V4L_VIDEO,
738 0, VIDEO_MAJOR,
739 func->vdev.minor);
740 if (!func->intf_devnode) {
741 ret = -ENOMEM;
742 goto err_rm_links1;
743 }
744
745 /* Connect the two DMA engines to the interface */
746 link = media_create_intf_link(&func->vdev.entity,
747 &func->intf_devnode->intf,
748 MEDIA_LNK_FL_IMMUTABLE |
749 MEDIA_LNK_FL_ENABLED);
750 if (!link) {
751 ret = -ENOMEM;
752 goto err_rm_devnode;
753 }
754
755 link = media_create_intf_link(&func->sink, &func->intf_devnode->intf,
756 MEDIA_LNK_FL_IMMUTABLE |
757 MEDIA_LNK_FL_ENABLED);
758 if (!link) {
759 ret = -ENOMEM;
760 goto err_rm_devnode;
761 }
762 return 0;
763
764 err_rm_devnode:
765 media_devnode_remove(func->intf_devnode);
766
767 err_rm_links1:
768 media_entity_remove_links(&func->sink);
769
770 err_rm_links0:
771 media_entity_remove_links(&func->proc);
772 media_entity_remove_links(&func->vdev.entity);
773
774 err_rel_entity2:
775 media_device_unregister_entity(&func->sink);
776
777 err_rel_entity1:
778 media_device_unregister_entity(&func->proc);
779
780 err_rel_entity0:
781 media_device_unregister_entity(&func->vdev.entity);
782 return ret;
783 }
784
hantro_detach_func(struct hantro_func * func)785 static void hantro_detach_func(struct hantro_func *func)
786 {
787 media_devnode_remove(func->intf_devnode);
788 media_entity_remove_links(&func->sink);
789 media_entity_remove_links(&func->proc);
790 media_entity_remove_links(&func->vdev.entity);
791 media_device_unregister_entity(&func->sink);
792 media_device_unregister_entity(&func->proc);
793 media_device_unregister_entity(&func->vdev.entity);
794 }
795
hantro_add_func(struct hantro_dev * vpu,unsigned int funcid)796 static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid)
797 {
798 const struct of_device_id *match;
799 struct hantro_func *func;
800 struct video_device *vfd;
801 int ret;
802
803 match = of_match_node(of_hantro_match, vpu->dev->of_node);
804 func = devm_kzalloc(vpu->dev, sizeof(*func), GFP_KERNEL);
805 if (!func) {
806 v4l2_err(&vpu->v4l2_dev, "Failed to allocate video device\n");
807 return -ENOMEM;
808 }
809
810 func->id = funcid;
811
812 vfd = &func->vdev;
813 vfd->fops = &hantro_fops;
814 vfd->release = video_device_release_empty;
815 vfd->lock = &vpu->vpu_mutex;
816 vfd->v4l2_dev = &vpu->v4l2_dev;
817 vfd->vfl_dir = VFL_DIR_M2M;
818 vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
819 vfd->ioctl_ops = &hantro_ioctl_ops;
820 snprintf(vfd->name, sizeof(vfd->name), "%s-%s", match->compatible,
821 funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER ? "enc" : "dec");
822
823 if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
824 vpu->encoder = func;
825 } else {
826 vpu->decoder = func;
827 v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
828 v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD);
829 }
830
831 video_set_drvdata(vfd, vpu);
832
833 ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
834 if (ret) {
835 v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
836 return ret;
837 }
838
839 ret = hantro_attach_func(vpu, func);
840 if (ret) {
841 v4l2_err(&vpu->v4l2_dev,
842 "Failed to attach functionality to the media device\n");
843 goto err_unreg_dev;
844 }
845
846 v4l2_info(&vpu->v4l2_dev, "registered %s as /dev/video%d\n", vfd->name,
847 vfd->num);
848
849 return 0;
850
851 err_unreg_dev:
852 video_unregister_device(vfd);
853 return ret;
854 }
855
hantro_add_enc_func(struct hantro_dev * vpu)856 static int hantro_add_enc_func(struct hantro_dev *vpu)
857 {
858 if (!vpu->variant->enc_fmts)
859 return 0;
860
861 return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
862 }
863
hantro_add_dec_func(struct hantro_dev * vpu)864 static int hantro_add_dec_func(struct hantro_dev *vpu)
865 {
866 if (!vpu->variant->dec_fmts)
867 return 0;
868
869 return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
870 }
871
hantro_remove_func(struct hantro_dev * vpu,unsigned int funcid)872 static void hantro_remove_func(struct hantro_dev *vpu,
873 unsigned int funcid)
874 {
875 struct hantro_func *func;
876
877 if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER)
878 func = vpu->encoder;
879 else
880 func = vpu->decoder;
881
882 if (!func)
883 return;
884
885 hantro_detach_func(func);
886 video_unregister_device(&func->vdev);
887 }
888
hantro_remove_enc_func(struct hantro_dev * vpu)889 static void hantro_remove_enc_func(struct hantro_dev *vpu)
890 {
891 hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
892 }
893
hantro_remove_dec_func(struct hantro_dev * vpu)894 static void hantro_remove_dec_func(struct hantro_dev *vpu)
895 {
896 hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
897 }
898
899 static const struct media_device_ops hantro_m2m_media_ops = {
900 .req_validate = vb2_request_validate,
901 .req_queue = v4l2_m2m_request_queue,
902 };
903
hantro_probe(struct platform_device * pdev)904 static int hantro_probe(struct platform_device *pdev)
905 {
906 const struct of_device_id *match;
907 struct hantro_dev *vpu;
908 struct resource *res;
909 int num_bases;
910 int i, ret;
911
912 vpu = devm_kzalloc(&pdev->dev, sizeof(*vpu), GFP_KERNEL);
913 if (!vpu)
914 return -ENOMEM;
915
916 vpu->dev = &pdev->dev;
917 vpu->pdev = pdev;
918 mutex_init(&vpu->vpu_mutex);
919 spin_lock_init(&vpu->irqlock);
920
921 match = of_match_node(of_hantro_match, pdev->dev.of_node);
922 vpu->variant = match->data;
923
924 /*
925 * Support for nxp,imx8mq-vpu is kept for backwards compatibility
926 * but it's deprecated. Please update your DTS file to use
927 * nxp,imx8mq-vpu-g1 or nxp,imx8mq-vpu-g2 instead.
928 */
929 if (of_device_is_compatible(pdev->dev.of_node, "nxp,imx8mq-vpu"))
930 dev_warn(&pdev->dev, "%s compatible is deprecated\n",
931 match->compatible);
932
933 INIT_DELAYED_WORK(&vpu->watchdog_work, hantro_watchdog);
934
935 vpu->clocks = devm_kcalloc(&pdev->dev, vpu->variant->num_clocks,
936 sizeof(*vpu->clocks), GFP_KERNEL);
937 if (!vpu->clocks)
938 return -ENOMEM;
939
940 if (vpu->variant->num_clocks > 1) {
941 for (i = 0; i < vpu->variant->num_clocks; i++)
942 vpu->clocks[i].id = vpu->variant->clk_names[i];
943
944 ret = devm_clk_bulk_get(&pdev->dev, vpu->variant->num_clocks,
945 vpu->clocks);
946 if (ret)
947 return ret;
948 } else {
949 /*
950 * If the driver has a single clk, chances are there will be no
951 * actual name in the DT bindings.
952 */
953 vpu->clocks[0].clk = devm_clk_get(&pdev->dev, NULL);
954 if (IS_ERR(vpu->clocks[0].clk))
955 return PTR_ERR(vpu->clocks[0].clk);
956 }
957
958 vpu->resets = devm_reset_control_array_get(&pdev->dev, false, true);
959 if (IS_ERR(vpu->resets))
960 return PTR_ERR(vpu->resets);
961
962 num_bases = vpu->variant->num_regs ?: 1;
963 vpu->reg_bases = devm_kcalloc(&pdev->dev, num_bases,
964 sizeof(*vpu->reg_bases), GFP_KERNEL);
965 if (!vpu->reg_bases)
966 return -ENOMEM;
967
968 for (i = 0; i < num_bases; i++) {
969 res = vpu->variant->reg_names ?
970 platform_get_resource_byname(vpu->pdev, IORESOURCE_MEM,
971 vpu->variant->reg_names[i]) :
972 platform_get_resource(vpu->pdev, IORESOURCE_MEM, 0);
973 vpu->reg_bases[i] = devm_ioremap_resource(vpu->dev, res);
974 if (IS_ERR(vpu->reg_bases[i]))
975 return PTR_ERR(vpu->reg_bases[i]);
976 }
977 vpu->enc_base = vpu->reg_bases[0] + vpu->variant->enc_offset;
978 vpu->dec_base = vpu->reg_bases[0] + vpu->variant->dec_offset;
979
980 /**
981 * TODO: Eventually allow taking advantage of full 64-bit address space.
982 * Until then we assume the MSB portion of buffers' base addresses is
983 * always 0 due to this masking operation.
984 */
985 ret = dma_set_coherent_mask(vpu->dev, DMA_BIT_MASK(32));
986 if (ret) {
987 dev_err(vpu->dev, "Could not set DMA coherent mask.\n");
988 return ret;
989 }
990 vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
991
992 for (i = 0; i < vpu->variant->num_irqs; i++) {
993 const char *irq_name;
994 int irq;
995
996 if (!vpu->variant->irqs[i].handler)
997 continue;
998
999 if (vpu->variant->num_irqs > 1) {
1000 irq_name = vpu->variant->irqs[i].name;
1001 irq = platform_get_irq_byname(vpu->pdev, irq_name);
1002 } else {
1003 /*
1004 * If the driver has a single IRQ, chances are there
1005 * will be no actual name in the DT bindings.
1006 */
1007 irq_name = "default";
1008 irq = platform_get_irq(vpu->pdev, 0);
1009 }
1010 if (irq <= 0)
1011 return -ENXIO;
1012
1013 ret = devm_request_irq(vpu->dev, irq,
1014 vpu->variant->irqs[i].handler, 0,
1015 dev_name(vpu->dev), vpu);
1016 if (ret) {
1017 dev_err(vpu->dev, "Could not request %s IRQ.\n",
1018 irq_name);
1019 return ret;
1020 }
1021 }
1022
1023 if (vpu->variant->init) {
1024 ret = vpu->variant->init(vpu);
1025 if (ret) {
1026 dev_err(&pdev->dev, "Failed to init VPU hardware\n");
1027 return ret;
1028 }
1029 }
1030
1031 pm_runtime_set_autosuspend_delay(vpu->dev, 100);
1032 pm_runtime_use_autosuspend(vpu->dev);
1033 pm_runtime_enable(vpu->dev);
1034
1035 ret = reset_control_deassert(vpu->resets);
1036 if (ret) {
1037 dev_err(&pdev->dev, "Failed to deassert resets\n");
1038 goto err_pm_disable;
1039 }
1040
1041 ret = clk_bulk_prepare(vpu->variant->num_clocks, vpu->clocks);
1042 if (ret) {
1043 dev_err(&pdev->dev, "Failed to prepare clocks\n");
1044 goto err_rst_assert;
1045 }
1046
1047 ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev);
1048 if (ret) {
1049 dev_err(&pdev->dev, "Failed to register v4l2 device\n");
1050 goto err_clk_unprepare;
1051 }
1052 platform_set_drvdata(pdev, vpu);
1053
1054 vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
1055 if (IS_ERR(vpu->m2m_dev)) {
1056 v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n");
1057 ret = PTR_ERR(vpu->m2m_dev);
1058 goto err_v4l2_unreg;
1059 }
1060
1061 vpu->mdev.dev = vpu->dev;
1062 strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
1063 strscpy(vpu->mdev.bus_info, "platform: " DRIVER_NAME,
1064 sizeof(vpu->mdev.bus_info));
1065 media_device_init(&vpu->mdev);
1066 vpu->mdev.ops = &hantro_m2m_media_ops;
1067 vpu->v4l2_dev.mdev = &vpu->mdev;
1068
1069 ret = hantro_add_enc_func(vpu);
1070 if (ret) {
1071 dev_err(&pdev->dev, "Failed to register encoder\n");
1072 goto err_m2m_rel;
1073 }
1074
1075 ret = hantro_add_dec_func(vpu);
1076 if (ret) {
1077 dev_err(&pdev->dev, "Failed to register decoder\n");
1078 goto err_rm_enc_func;
1079 }
1080
1081 ret = media_device_register(&vpu->mdev);
1082 if (ret) {
1083 v4l2_err(&vpu->v4l2_dev, "Failed to register mem2mem media device\n");
1084 goto err_rm_dec_func;
1085 }
1086
1087 return 0;
1088
1089 err_rm_dec_func:
1090 hantro_remove_dec_func(vpu);
1091 err_rm_enc_func:
1092 hantro_remove_enc_func(vpu);
1093 err_m2m_rel:
1094 media_device_cleanup(&vpu->mdev);
1095 v4l2_m2m_release(vpu->m2m_dev);
1096 err_v4l2_unreg:
1097 v4l2_device_unregister(&vpu->v4l2_dev);
1098 err_clk_unprepare:
1099 clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
1100 err_rst_assert:
1101 reset_control_assert(vpu->resets);
1102 err_pm_disable:
1103 pm_runtime_dont_use_autosuspend(vpu->dev);
1104 pm_runtime_disable(vpu->dev);
1105 return ret;
1106 }
1107
hantro_remove(struct platform_device * pdev)1108 static int hantro_remove(struct platform_device *pdev)
1109 {
1110 struct hantro_dev *vpu = platform_get_drvdata(pdev);
1111
1112 v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name);
1113
1114 media_device_unregister(&vpu->mdev);
1115 hantro_remove_dec_func(vpu);
1116 hantro_remove_enc_func(vpu);
1117 media_device_cleanup(&vpu->mdev);
1118 v4l2_m2m_release(vpu->m2m_dev);
1119 v4l2_device_unregister(&vpu->v4l2_dev);
1120 clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
1121 reset_control_assert(vpu->resets);
1122 pm_runtime_dont_use_autosuspend(vpu->dev);
1123 pm_runtime_disable(vpu->dev);
1124 return 0;
1125 }
1126
1127 #ifdef CONFIG_PM
hantro_runtime_resume(struct device * dev)1128 static int hantro_runtime_resume(struct device *dev)
1129 {
1130 struct hantro_dev *vpu = dev_get_drvdata(dev);
1131
1132 if (vpu->variant->runtime_resume)
1133 return vpu->variant->runtime_resume(vpu);
1134
1135 return 0;
1136 }
1137 #endif
1138
1139 static const struct dev_pm_ops hantro_pm_ops = {
1140 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1141 pm_runtime_force_resume)
1142 SET_RUNTIME_PM_OPS(NULL, hantro_runtime_resume, NULL)
1143 };
1144
1145 static struct platform_driver hantro_driver = {
1146 .probe = hantro_probe,
1147 .remove = hantro_remove,
1148 .driver = {
1149 .name = DRIVER_NAME,
1150 .of_match_table = of_match_ptr(of_hantro_match),
1151 .pm = &hantro_pm_ops,
1152 },
1153 };
1154 module_platform_driver(hantro_driver);
1155
1156 MODULE_LICENSE("GPL v2");
1157 MODULE_AUTHOR("Alpha Lin <Alpha.Lin@Rock-Chips.com>");
1158 MODULE_AUTHOR("Tomasz Figa <tfiga@chromium.org>");
1159 MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
1160 MODULE_DESCRIPTION("Hantro VPU codec driver");
1161