1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Hantro G1 post-processor support
4  *
5  * Copyright (C) 2019 Collabora, Ltd.
6  */
7 
8 #include <linux/dma-mapping.h>
9 #include <linux/types.h>
10 
11 #include "hantro.h"
12 #include "hantro_hw.h"
13 #include "hantro_g1_regs.h"
14 #include "hantro_g2_regs.h"
15 
16 #define HANTRO_PP_REG_WRITE(vpu, reg_name, val) \
17 { \
18 	hantro_reg_write(vpu, \
19 			 &hantro_g1_postproc_regs.reg_name, \
20 			 val); \
21 }
22 
23 #define HANTRO_PP_REG_WRITE_S(vpu, reg_name, val) \
24 { \
25 	hantro_reg_write_s(vpu, \
26 			   &hantro_g1_postproc_regs.reg_name, \
27 			   val); \
28 }
29 
30 #define VPU_PP_IN_YUYV			0x0
31 #define VPU_PP_IN_NV12			0x1
32 #define VPU_PP_IN_YUV420		0x2
33 #define VPU_PP_IN_YUV240_TILED		0x5
34 #define VPU_PP_OUT_RGB			0x0
35 #define VPU_PP_OUT_YUYV			0x3
36 
37 static const struct hantro_postproc_regs hantro_g1_postproc_regs = {
38 	.pipeline_en = {G1_REG_PP_INTERRUPT, 1, 0x1},
39 	.max_burst = {G1_REG_PP_DEV_CONFIG, 0, 0x1f},
40 	.clk_gate = {G1_REG_PP_DEV_CONFIG, 1, 0x1},
41 	.out_swap32 = {G1_REG_PP_DEV_CONFIG, 5, 0x1},
42 	.out_endian = {G1_REG_PP_DEV_CONFIG, 6, 0x1},
43 	.out_luma_base = {G1_REG_PP_OUT_LUMA_BASE, 0, 0xffffffff},
44 	.input_width = {G1_REG_PP_INPUT_SIZE, 0, 0x1ff},
45 	.input_height = {G1_REG_PP_INPUT_SIZE, 9, 0x1ff},
46 	.output_width = {G1_REG_PP_CONTROL, 4, 0x7ff},
47 	.output_height = {G1_REG_PP_CONTROL, 15, 0x7ff},
48 	.input_fmt = {G1_REG_PP_CONTROL, 29, 0x7},
49 	.output_fmt = {G1_REG_PP_CONTROL, 26, 0x7},
50 	.orig_width = {G1_REG_PP_MASK1_ORIG_WIDTH, 23, 0x1ff},
51 	.display_width = {G1_REG_PP_DISPLAY_WIDTH, 0, 0xfff},
52 };
53 
hantro_needs_postproc(const struct hantro_ctx * ctx,const struct hantro_fmt * fmt)54 bool hantro_needs_postproc(const struct hantro_ctx *ctx,
55 			   const struct hantro_fmt *fmt)
56 {
57 	if (ctx->is_encoder)
58 		return false;
59 	return fmt->postprocessed;
60 }
61 
hantro_postproc_g1_enable(struct hantro_ctx * ctx)62 static void hantro_postproc_g1_enable(struct hantro_ctx *ctx)
63 {
64 	struct hantro_dev *vpu = ctx->dev;
65 	struct vb2_v4l2_buffer *dst_buf;
66 	u32 src_pp_fmt, dst_pp_fmt;
67 	dma_addr_t dst_dma;
68 
69 	/* Turn on pipeline mode. Must be done first. */
70 	HANTRO_PP_REG_WRITE_S(vpu, pipeline_en, 0x1);
71 
72 	src_pp_fmt = VPU_PP_IN_NV12;
73 
74 	switch (ctx->vpu_dst_fmt->fourcc) {
75 	case V4L2_PIX_FMT_YUYV:
76 		dst_pp_fmt = VPU_PP_OUT_YUYV;
77 		break;
78 	default:
79 		WARN(1, "output format %d not supported by the post-processor, this wasn't expected.",
80 		     ctx->vpu_dst_fmt->fourcc);
81 		dst_pp_fmt = 0;
82 		break;
83 	}
84 
85 	dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
86 	dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
87 
88 	HANTRO_PP_REG_WRITE(vpu, clk_gate, 0x1);
89 	HANTRO_PP_REG_WRITE(vpu, out_endian, 0x1);
90 	HANTRO_PP_REG_WRITE(vpu, out_swap32, 0x1);
91 	HANTRO_PP_REG_WRITE(vpu, max_burst, 16);
92 	HANTRO_PP_REG_WRITE(vpu, out_luma_base, dst_dma);
93 	HANTRO_PP_REG_WRITE(vpu, input_width, MB_WIDTH(ctx->dst_fmt.width));
94 	HANTRO_PP_REG_WRITE(vpu, input_height, MB_HEIGHT(ctx->dst_fmt.height));
95 	HANTRO_PP_REG_WRITE(vpu, input_fmt, src_pp_fmt);
96 	HANTRO_PP_REG_WRITE(vpu, output_fmt, dst_pp_fmt);
97 	HANTRO_PP_REG_WRITE(vpu, output_width, ctx->dst_fmt.width);
98 	HANTRO_PP_REG_WRITE(vpu, output_height, ctx->dst_fmt.height);
99 	HANTRO_PP_REG_WRITE(vpu, orig_width, MB_WIDTH(ctx->dst_fmt.width));
100 	HANTRO_PP_REG_WRITE(vpu, display_width, ctx->dst_fmt.width);
101 }
102 
down_scale_factor(struct hantro_ctx * ctx)103 static int down_scale_factor(struct hantro_ctx *ctx)
104 {
105 	if (ctx->src_fmt.width == ctx->dst_fmt.width)
106 		return 0;
107 
108 	return DIV_ROUND_CLOSEST(ctx->src_fmt.width, ctx->dst_fmt.width);
109 }
110 
hantro_postproc_g2_enable(struct hantro_ctx * ctx)111 static void hantro_postproc_g2_enable(struct hantro_ctx *ctx)
112 {
113 	struct hantro_dev *vpu = ctx->dev;
114 	struct vb2_v4l2_buffer *dst_buf;
115 	size_t chroma_offset = ctx->dst_fmt.width * ctx->dst_fmt.height;
116 	int down_scale = down_scale_factor(ctx);
117 	dma_addr_t dst_dma;
118 
119 	dst_buf = hantro_get_dst_buf(ctx);
120 	dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
121 
122 	if (down_scale) {
123 		hantro_reg_write(vpu, &g2_down_scale_e, 1);
124 		hantro_reg_write(vpu, &g2_down_scale_y, down_scale >> 2);
125 		hantro_reg_write(vpu, &g2_down_scale_x, down_scale >> 2);
126 		hantro_write_addr(vpu, G2_DS_DST, dst_dma);
127 		hantro_write_addr(vpu, G2_DS_DST_CHR, dst_dma + (chroma_offset >> down_scale));
128 	} else {
129 		hantro_write_addr(vpu, G2_RS_OUT_LUMA_ADDR, dst_dma);
130 		hantro_write_addr(vpu, G2_RS_OUT_CHROMA_ADDR, dst_dma + chroma_offset);
131 	}
132 	hantro_reg_write(vpu, &g2_out_rs_e, 1);
133 }
134 
hantro_postproc_g2_enum_framesizes(struct hantro_ctx * ctx,struct v4l2_frmsizeenum * fsize)135 static int hantro_postproc_g2_enum_framesizes(struct hantro_ctx *ctx,
136 					      struct v4l2_frmsizeenum *fsize)
137 {
138 	/**
139 	 * G2 scaler can scale down by 0, 2, 4 or 8
140 	 * use fsize->index has power of 2 diviser
141 	 **/
142 	if (fsize->index > 3)
143 		return -EINVAL;
144 
145 	if (!ctx->src_fmt.width || !ctx->src_fmt.height)
146 		return -EINVAL;
147 
148 	fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
149 	fsize->discrete.width = ctx->src_fmt.width >> fsize->index;
150 	fsize->discrete.height = ctx->src_fmt.height >> fsize->index;
151 
152 	return 0;
153 }
154 
hantro_postproc_free(struct hantro_ctx * ctx)155 void hantro_postproc_free(struct hantro_ctx *ctx)
156 {
157 	struct hantro_dev *vpu = ctx->dev;
158 	unsigned int i;
159 
160 	for (i = 0; i < VB2_MAX_FRAME; ++i) {
161 		struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
162 
163 		if (priv->cpu) {
164 			dma_free_attrs(vpu->dev, priv->size, priv->cpu,
165 				       priv->dma, priv->attrs);
166 			priv->cpu = NULL;
167 		}
168 	}
169 }
170 
hantro_postproc_alloc(struct hantro_ctx * ctx)171 int hantro_postproc_alloc(struct hantro_ctx *ctx)
172 {
173 	struct hantro_dev *vpu = ctx->dev;
174 	struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
175 	struct vb2_queue *cap_queue = &m2m_ctx->cap_q_ctx.q;
176 	unsigned int num_buffers = cap_queue->num_buffers;
177 	unsigned int i, buf_size;
178 
179 	buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage;
180 	if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE)
181 		buf_size += hantro_h264_mv_size(ctx->dst_fmt.width,
182 						ctx->dst_fmt.height);
183 	else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_VP9_FRAME)
184 		buf_size += hantro_vp9_mv_size(ctx->dst_fmt.width,
185 					       ctx->dst_fmt.height);
186 	else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE)
187 		buf_size += hantro_hevc_mv_size(ctx->dst_fmt.width,
188 						ctx->dst_fmt.height);
189 
190 	for (i = 0; i < num_buffers; ++i) {
191 		struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
192 
193 		/*
194 		 * The buffers on this queue are meant as intermediate
195 		 * buffers for the decoder, so no mapping is needed.
196 		 */
197 		priv->attrs = DMA_ATTR_NO_KERNEL_MAPPING;
198 		priv->cpu = dma_alloc_attrs(vpu->dev, buf_size, &priv->dma,
199 					    GFP_KERNEL, priv->attrs);
200 		if (!priv->cpu)
201 			return -ENOMEM;
202 		priv->size = buf_size;
203 	}
204 	return 0;
205 }
206 
hantro_postproc_g1_disable(struct hantro_ctx * ctx)207 static void hantro_postproc_g1_disable(struct hantro_ctx *ctx)
208 {
209 	struct hantro_dev *vpu = ctx->dev;
210 
211 	HANTRO_PP_REG_WRITE_S(vpu, pipeline_en, 0x0);
212 }
213 
hantro_postproc_g2_disable(struct hantro_ctx * ctx)214 static void hantro_postproc_g2_disable(struct hantro_ctx *ctx)
215 {
216 	struct hantro_dev *vpu = ctx->dev;
217 
218 	hantro_reg_write(vpu, &g2_out_rs_e, 0);
219 }
220 
hantro_postproc_disable(struct hantro_ctx * ctx)221 void hantro_postproc_disable(struct hantro_ctx *ctx)
222 {
223 	struct hantro_dev *vpu = ctx->dev;
224 
225 	if (vpu->variant->postproc_ops && vpu->variant->postproc_ops->disable)
226 		vpu->variant->postproc_ops->disable(ctx);
227 }
228 
hantro_postproc_enable(struct hantro_ctx * ctx)229 void hantro_postproc_enable(struct hantro_ctx *ctx)
230 {
231 	struct hantro_dev *vpu = ctx->dev;
232 
233 	if (vpu->variant->postproc_ops && vpu->variant->postproc_ops->enable)
234 		vpu->variant->postproc_ops->enable(ctx);
235 }
236 
hanto_postproc_enum_framesizes(struct hantro_ctx * ctx,struct v4l2_frmsizeenum * fsize)237 int hanto_postproc_enum_framesizes(struct hantro_ctx *ctx,
238 				   struct v4l2_frmsizeenum *fsize)
239 {
240 	struct hantro_dev *vpu = ctx->dev;
241 
242 	if (vpu->variant->postproc_ops && vpu->variant->postproc_ops->enum_framesizes)
243 		return vpu->variant->postproc_ops->enum_framesizes(ctx, fsize);
244 
245 	return -EINVAL;
246 }
247 
248 const struct hantro_postproc_ops hantro_g1_postproc_ops = {
249 	.enable = hantro_postproc_g1_enable,
250 	.disable = hantro_postproc_g1_disable,
251 };
252 
253 const struct hantro_postproc_ops hantro_g2_postproc_ops = {
254 	.enable = hantro_postproc_g2_enable,
255 	.disable = hantro_postproc_g2_disable,
256 	.enum_framesizes = hantro_postproc_g2_enum_framesizes,
257 };
258