1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2020-2021 NXP
4 */
5
6 #include <linux/init.h>
7 #include <linux/interconnect.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include "vpu.h"
14 #include "vpu_core.h"
15 #include "vpu_rpc.h"
16 #include "vpu_helpers.h"
17
vpu_helper_find_in_array_u8(const u8 * array,u32 size,u32 x)18 int vpu_helper_find_in_array_u8(const u8 *array, u32 size, u32 x)
19 {
20 int i;
21
22 for (i = 0; i < size; i++) {
23 if (array[i] == x)
24 return i;
25 }
26
27 return 0;
28 }
29
vpu_helper_check_type(struct vpu_inst * inst,u32 type)30 bool vpu_helper_check_type(struct vpu_inst *inst, u32 type)
31 {
32 const struct vpu_format *pfmt;
33
34 for (pfmt = inst->formats; pfmt->pixfmt; pfmt++) {
35 if (!vpu_iface_check_format(inst, pfmt->pixfmt))
36 continue;
37 if (pfmt->type == type)
38 return true;
39 }
40
41 return false;
42 }
43
vpu_helper_find_format(struct vpu_inst * inst,u32 type,u32 pixelfmt)44 const struct vpu_format *vpu_helper_find_format(struct vpu_inst *inst, u32 type, u32 pixelfmt)
45 {
46 const struct vpu_format *pfmt;
47
48 if (!inst || !inst->formats)
49 return NULL;
50
51 if (!vpu_iface_check_format(inst, pixelfmt))
52 return NULL;
53
54 for (pfmt = inst->formats; pfmt->pixfmt; pfmt++) {
55 if (pfmt->pixfmt == pixelfmt && (!type || type == pfmt->type))
56 return pfmt;
57 }
58
59 return NULL;
60 }
61
vpu_helper_enum_format(struct vpu_inst * inst,u32 type,int index)62 const struct vpu_format *vpu_helper_enum_format(struct vpu_inst *inst, u32 type, int index)
63 {
64 const struct vpu_format *pfmt;
65 int i = 0;
66
67 if (!inst || !inst->formats)
68 return NULL;
69
70 for (pfmt = inst->formats; pfmt->pixfmt; pfmt++) {
71 if (!vpu_iface_check_format(inst, pfmt->pixfmt))
72 continue;
73
74 if (pfmt->type == type) {
75 if (index == i)
76 return pfmt;
77 i++;
78 }
79 }
80
81 return NULL;
82 }
83
vpu_helper_valid_frame_width(struct vpu_inst * inst,u32 width)84 u32 vpu_helper_valid_frame_width(struct vpu_inst *inst, u32 width)
85 {
86 const struct vpu_core_resources *res;
87
88 if (!inst)
89 return width;
90
91 res = vpu_get_resource(inst);
92 if (!res)
93 return width;
94 if (res->max_width)
95 width = clamp(width, res->min_width, res->max_width);
96 if (res->step_width)
97 width = ALIGN(width, res->step_width);
98
99 return width;
100 }
101
vpu_helper_valid_frame_height(struct vpu_inst * inst,u32 height)102 u32 vpu_helper_valid_frame_height(struct vpu_inst *inst, u32 height)
103 {
104 const struct vpu_core_resources *res;
105
106 if (!inst)
107 return height;
108
109 res = vpu_get_resource(inst);
110 if (!res)
111 return height;
112 if (res->max_height)
113 height = clamp(height, res->min_height, res->max_height);
114 if (res->step_height)
115 height = ALIGN(height, res->step_height);
116
117 return height;
118 }
119
get_nv12_plane_size(u32 width,u32 height,int plane_no,u32 stride,u32 interlaced,u32 * pbl)120 static u32 get_nv12_plane_size(u32 width, u32 height, int plane_no,
121 u32 stride, u32 interlaced, u32 *pbl)
122 {
123 u32 bytesperline;
124 u32 size = 0;
125
126 bytesperline = ALIGN(width, stride);
127 if (pbl)
128 bytesperline = max(bytesperline, *pbl);
129 height = ALIGN(height, 2);
130 if (plane_no == 0)
131 size = bytesperline * height;
132 else if (plane_no == 1)
133 size = bytesperline * height >> 1;
134 if (pbl)
135 *pbl = bytesperline;
136
137 return size;
138 }
139
get_tiled_8l128_plane_size(u32 fmt,u32 width,u32 height,int plane_no,u32 stride,u32 interlaced,u32 * pbl)140 static u32 get_tiled_8l128_plane_size(u32 fmt, u32 width, u32 height, int plane_no,
141 u32 stride, u32 interlaced, u32 *pbl)
142 {
143 u32 ws = 3;
144 u32 hs = 7;
145 u32 bitdepth = 8;
146 u32 bytesperline;
147 u32 size = 0;
148
149 if (interlaced)
150 hs++;
151 if (fmt == V4L2_PIX_FMT_NV12M_10BE_8L128)
152 bitdepth = 10;
153 bytesperline = DIV_ROUND_UP(width * bitdepth, BITS_PER_BYTE);
154 bytesperline = ALIGN(bytesperline, 1 << ws);
155 bytesperline = ALIGN(bytesperline, stride);
156 if (pbl)
157 bytesperline = max(bytesperline, *pbl);
158 height = ALIGN(height, 1 << hs);
159 if (plane_no == 0)
160 size = bytesperline * height;
161 else if (plane_no == 1)
162 size = (bytesperline * ALIGN(height, 1 << (hs + 1))) >> 1;
163 if (pbl)
164 *pbl = bytesperline;
165
166 return size;
167 }
168
get_default_plane_size(u32 width,u32 height,int plane_no,u32 stride,u32 interlaced,u32 * pbl)169 static u32 get_default_plane_size(u32 width, u32 height, int plane_no,
170 u32 stride, u32 interlaced, u32 *pbl)
171 {
172 u32 bytesperline;
173 u32 size = 0;
174
175 bytesperline = ALIGN(width, stride);
176 if (pbl)
177 bytesperline = max(bytesperline, *pbl);
178 if (plane_no == 0)
179 size = bytesperline * height;
180 if (pbl)
181 *pbl = bytesperline;
182
183 return size;
184 }
185
vpu_helper_get_plane_size(u32 fmt,u32 w,u32 h,int plane_no,u32 stride,u32 interlaced,u32 * pbl)186 u32 vpu_helper_get_plane_size(u32 fmt, u32 w, u32 h, int plane_no,
187 u32 stride, u32 interlaced, u32 *pbl)
188 {
189 switch (fmt) {
190 case V4L2_PIX_FMT_NV12M:
191 return get_nv12_plane_size(w, h, plane_no, stride, interlaced, pbl);
192 case V4L2_PIX_FMT_NV12M_8L128:
193 case V4L2_PIX_FMT_NV12M_10BE_8L128:
194 return get_tiled_8l128_plane_size(fmt, w, h, plane_no, stride, interlaced, pbl);
195 default:
196 return get_default_plane_size(w, h, plane_no, stride, interlaced, pbl);
197 }
198 }
199
vpu_helper_copy_from_stream_buffer(struct vpu_buffer * stream_buffer,u32 * rptr,u32 size,void * dst)200 int vpu_helper_copy_from_stream_buffer(struct vpu_buffer *stream_buffer,
201 u32 *rptr, u32 size, void *dst)
202 {
203 u32 offset;
204 u32 start;
205 u32 end;
206 void *virt;
207
208 if (!stream_buffer || !rptr || !dst)
209 return -EINVAL;
210
211 if (!size)
212 return 0;
213
214 offset = *rptr;
215 start = stream_buffer->phys;
216 end = start + stream_buffer->length;
217 virt = stream_buffer->virt;
218
219 if (offset < start || offset > end)
220 return -EINVAL;
221
222 if (offset + size <= end) {
223 memcpy(dst, virt + (offset - start), size);
224 } else {
225 memcpy(dst, virt + (offset - start), end - offset);
226 memcpy(dst + end - offset, virt, size + offset - end);
227 }
228
229 *rptr = vpu_helper_step_walk(stream_buffer, offset, size);
230
231 return 0;
232 }
233
vpu_helper_copy_to_stream_buffer(struct vpu_buffer * stream_buffer,u32 * wptr,u32 size,void * src)234 int vpu_helper_copy_to_stream_buffer(struct vpu_buffer *stream_buffer,
235 u32 *wptr, u32 size, void *src)
236 {
237 u32 offset;
238 u32 start;
239 u32 end;
240 void *virt;
241
242 if (!stream_buffer || !wptr || !src)
243 return -EINVAL;
244
245 if (!size)
246 return 0;
247
248 offset = *wptr;
249 start = stream_buffer->phys;
250 end = start + stream_buffer->length;
251 virt = stream_buffer->virt;
252 if (offset < start || offset > end)
253 return -EINVAL;
254
255 if (offset + size <= end) {
256 memcpy(virt + (offset - start), src, size);
257 } else {
258 memcpy(virt + (offset - start), src, end - offset);
259 memcpy(virt, src + end - offset, size + offset - end);
260 }
261
262 *wptr = vpu_helper_step_walk(stream_buffer, offset, size);
263
264 return 0;
265 }
266
vpu_helper_memset_stream_buffer(struct vpu_buffer * stream_buffer,u32 * wptr,u8 val,u32 size)267 int vpu_helper_memset_stream_buffer(struct vpu_buffer *stream_buffer,
268 u32 *wptr, u8 val, u32 size)
269 {
270 u32 offset;
271 u32 start;
272 u32 end;
273 void *virt;
274
275 if (!stream_buffer || !wptr)
276 return -EINVAL;
277
278 if (!size)
279 return 0;
280
281 offset = *wptr;
282 start = stream_buffer->phys;
283 end = start + stream_buffer->length;
284 virt = stream_buffer->virt;
285 if (offset < start || offset > end)
286 return -EINVAL;
287
288 if (offset + size <= end) {
289 memset(virt + (offset - start), val, size);
290 } else {
291 memset(virt + (offset - start), val, end - offset);
292 memset(virt, val, size + offset - end);
293 }
294
295 offset += size;
296 if (offset >= end)
297 offset -= stream_buffer->length;
298
299 *wptr = offset;
300
301 return 0;
302 }
303
vpu_helper_get_free_space(struct vpu_inst * inst)304 u32 vpu_helper_get_free_space(struct vpu_inst *inst)
305 {
306 struct vpu_rpc_buffer_desc desc;
307
308 if (vpu_iface_get_stream_buffer_desc(inst, &desc))
309 return 0;
310
311 if (desc.rptr > desc.wptr)
312 return desc.rptr - desc.wptr;
313 else if (desc.rptr < desc.wptr)
314 return (desc.end - desc.start + desc.rptr - desc.wptr);
315 else
316 return desc.end - desc.start;
317 }
318
vpu_helper_get_used_space(struct vpu_inst * inst)319 u32 vpu_helper_get_used_space(struct vpu_inst *inst)
320 {
321 struct vpu_rpc_buffer_desc desc;
322
323 if (vpu_iface_get_stream_buffer_desc(inst, &desc))
324 return 0;
325
326 if (desc.wptr > desc.rptr)
327 return desc.wptr - desc.rptr;
328 else if (desc.wptr < desc.rptr)
329 return (desc.end - desc.start + desc.wptr - desc.rptr);
330 else
331 return 0;
332 }
333
vpu_helper_g_volatile_ctrl(struct v4l2_ctrl * ctrl)334 int vpu_helper_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
335 {
336 struct vpu_inst *inst = ctrl_to_inst(ctrl);
337
338 switch (ctrl->id) {
339 case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
340 ctrl->val = inst->min_buffer_cap;
341 break;
342 case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
343 ctrl->val = inst->min_buffer_out;
344 break;
345 default:
346 return -EINVAL;
347 }
348
349 return 0;
350 }
351
vpu_helper_find_startcode(struct vpu_buffer * stream_buffer,u32 pixelformat,u32 offset,u32 bytesused)352 int vpu_helper_find_startcode(struct vpu_buffer *stream_buffer,
353 u32 pixelformat, u32 offset, u32 bytesused)
354 {
355 u32 start_code;
356 int start_code_size;
357 u32 val = 0;
358 int i;
359 int ret = -EINVAL;
360
361 if (!stream_buffer || !stream_buffer->virt)
362 return -EINVAL;
363
364 switch (pixelformat) {
365 case V4L2_PIX_FMT_H264:
366 start_code_size = 4;
367 start_code = 0x00000001;
368 break;
369 default:
370 return 0;
371 }
372
373 for (i = 0; i < bytesused; i++) {
374 val = (val << 8) | vpu_helper_read_byte(stream_buffer, offset + i);
375 if (i < start_code_size - 1)
376 continue;
377 if (val == start_code) {
378 ret = i + 1 - start_code_size;
379 break;
380 }
381 }
382
383 return ret;
384 }
385
vpu_find_dst_by_src(struct vpu_pair * pairs,u32 cnt,u32 src)386 int vpu_find_dst_by_src(struct vpu_pair *pairs, u32 cnt, u32 src)
387 {
388 u32 i;
389
390 if (!pairs || !cnt)
391 return -EINVAL;
392
393 for (i = 0; i < cnt; i++) {
394 if (pairs[i].src == src)
395 return pairs[i].dst;
396 }
397
398 return -EINVAL;
399 }
400
vpu_find_src_by_dst(struct vpu_pair * pairs,u32 cnt,u32 dst)401 int vpu_find_src_by_dst(struct vpu_pair *pairs, u32 cnt, u32 dst)
402 {
403 u32 i;
404
405 if (!pairs || !cnt)
406 return -EINVAL;
407
408 for (i = 0; i < cnt; i++) {
409 if (pairs[i].dst == dst)
410 return pairs[i].src;
411 }
412
413 return -EINVAL;
414 }
415