1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * vivid-kthread-cap.h - video/vbi capture thread support functions.
4 *
5 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6 */
7
8 #include <linux/module.h>
9 #include <linux/errno.h>
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/font.h>
15 #include <linux/mutex.h>
16 #include <linux/videodev2.h>
17 #include <linux/kthread.h>
18 #include <linux/freezer.h>
19 #include <linux/random.h>
20 #include <linux/v4l2-dv-timings.h>
21 #include <linux/jiffies.h>
22 #include <asm/div64.h>
23 #include <media/videobuf2-vmalloc.h>
24 #include <media/v4l2-dv-timings.h>
25 #include <media/v4l2-ioctl.h>
26 #include <media/v4l2-fh.h>
27 #include <media/v4l2-event.h>
28 #include <media/v4l2-rect.h>
29
30 #include "vivid-core.h"
31 #include "vivid-vid-common.h"
32 #include "vivid-vid-cap.h"
33 #include "vivid-vid-out.h"
34 #include "vivid-radio-common.h"
35 #include "vivid-radio-rx.h"
36 #include "vivid-radio-tx.h"
37 #include "vivid-sdr-cap.h"
38 #include "vivid-vbi-cap.h"
39 #include "vivid-vbi-out.h"
40 #include "vivid-osd.h"
41 #include "vivid-ctrls.h"
42 #include "vivid-kthread-cap.h"
43 #include "vivid-meta-cap.h"
44
vivid_get_std_cap(const struct vivid_dev * dev)45 static inline v4l2_std_id vivid_get_std_cap(const struct vivid_dev *dev)
46 {
47 if (vivid_is_sdtv_cap(dev))
48 return dev->std_cap[dev->input];
49 return 0;
50 }
51
copy_pix(struct vivid_dev * dev,int win_y,int win_x,u16 * cap,const u16 * osd)52 static void copy_pix(struct vivid_dev *dev, int win_y, int win_x,
53 u16 *cap, const u16 *osd)
54 {
55 u16 out;
56 int left = dev->overlay_out_left;
57 int top = dev->overlay_out_top;
58 int fb_x = win_x + left;
59 int fb_y = win_y + top;
60 int i;
61
62 out = *cap;
63 *cap = *osd;
64 if (dev->bitmap_out) {
65 const u8 *p = dev->bitmap_out;
66 unsigned stride = (dev->compose_out.width + 7) / 8;
67
68 win_x -= dev->compose_out.left;
69 win_y -= dev->compose_out.top;
70 if (!(p[stride * win_y + win_x / 8] & (1 << (win_x & 7))))
71 return;
72 }
73
74 for (i = 0; i < dev->clipcount_out; i++) {
75 struct v4l2_rect *r = &dev->clips_out[i].c;
76
77 if (fb_y >= r->top && fb_y < r->top + r->height &&
78 fb_x >= r->left && fb_x < r->left + r->width)
79 return;
80 }
81 if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_CHROMAKEY) &&
82 *osd != dev->chromakey_out)
83 return;
84 if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY) &&
85 out == dev->chromakey_out)
86 return;
87 if (dev->fmt_cap->alpha_mask) {
88 if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_GLOBAL_ALPHA) &&
89 dev->global_alpha_out)
90 return;
91 if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) &&
92 *cap & dev->fmt_cap->alpha_mask)
93 return;
94 if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_LOCAL_INV_ALPHA) &&
95 !(*cap & dev->fmt_cap->alpha_mask))
96 return;
97 }
98 *cap = out;
99 }
100
blend_line(struct vivid_dev * dev,unsigned y_offset,unsigned x_offset,u8 * vcapbuf,const u8 * vosdbuf,unsigned width,unsigned pixsize)101 static void blend_line(struct vivid_dev *dev, unsigned y_offset, unsigned x_offset,
102 u8 *vcapbuf, const u8 *vosdbuf,
103 unsigned width, unsigned pixsize)
104 {
105 unsigned x;
106
107 for (x = 0; x < width; x++, vcapbuf += pixsize, vosdbuf += pixsize) {
108 copy_pix(dev, y_offset, x_offset + x,
109 (u16 *)vcapbuf, (const u16 *)vosdbuf);
110 }
111 }
112
scale_line(const u8 * src,u8 * dst,unsigned srcw,unsigned dstw,unsigned twopixsize)113 static void scale_line(const u8 *src, u8 *dst, unsigned srcw, unsigned dstw, unsigned twopixsize)
114 {
115 /* Coarse scaling with Bresenham */
116 unsigned int_part;
117 unsigned fract_part;
118 unsigned src_x = 0;
119 unsigned error = 0;
120 unsigned x;
121
122 /*
123 * We always combine two pixels to prevent color bleed in the packed
124 * yuv case.
125 */
126 srcw /= 2;
127 dstw /= 2;
128 int_part = srcw / dstw;
129 fract_part = srcw % dstw;
130 for (x = 0; x < dstw; x++, dst += twopixsize) {
131 memcpy(dst, src + src_x * twopixsize, twopixsize);
132 src_x += int_part;
133 error += fract_part;
134 if (error >= dstw) {
135 error -= dstw;
136 src_x++;
137 }
138 }
139 }
140
141 /*
142 * Precalculate the rectangles needed to perform video looping:
143 *
144 * The nominal pipeline is that the video output buffer is cropped by
145 * crop_out, scaled to compose_out, overlaid with the output overlay,
146 * cropped on the capture side by crop_cap and scaled again to the video
147 * capture buffer using compose_cap.
148 *
149 * To keep things efficient we calculate the intersection of compose_out
150 * and crop_cap (since that's the only part of the video that will
151 * actually end up in the capture buffer), determine which part of the
152 * video output buffer that is and which part of the video capture buffer
153 * so we can scale the video straight from the output buffer to the capture
154 * buffer without any intermediate steps.
155 *
156 * If we need to deal with an output overlay, then there is no choice and
157 * that intermediate step still has to be taken. For the output overlay
158 * support we calculate the intersection of the framebuffer and the overlay
159 * window (which may be partially or wholly outside of the framebuffer
160 * itself) and the intersection of that with loop_vid_copy (i.e. the part of
161 * the actual looped video that will be overlaid). The result is calculated
162 * both in framebuffer coordinates (loop_fb_copy) and compose_out coordinates
163 * (loop_vid_overlay). Finally calculate the part of the capture buffer that
164 * will receive that overlaid video.
165 */
vivid_precalc_copy_rects(struct vivid_dev * dev)166 static void vivid_precalc_copy_rects(struct vivid_dev *dev)
167 {
168 /* Framebuffer rectangle */
169 struct v4l2_rect r_fb = {
170 0, 0, dev->display_width, dev->display_height
171 };
172 /* Overlay window rectangle in framebuffer coordinates */
173 struct v4l2_rect r_overlay = {
174 dev->overlay_out_left, dev->overlay_out_top,
175 dev->compose_out.width, dev->compose_out.height
176 };
177
178 v4l2_rect_intersect(&dev->loop_vid_copy, &dev->crop_cap, &dev->compose_out);
179
180 dev->loop_vid_out = dev->loop_vid_copy;
181 v4l2_rect_scale(&dev->loop_vid_out, &dev->compose_out, &dev->crop_out);
182 dev->loop_vid_out.left += dev->crop_out.left;
183 dev->loop_vid_out.top += dev->crop_out.top;
184
185 dev->loop_vid_cap = dev->loop_vid_copy;
186 v4l2_rect_scale(&dev->loop_vid_cap, &dev->crop_cap, &dev->compose_cap);
187
188 dprintk(dev, 1,
189 "loop_vid_copy: %dx%d@%dx%d loop_vid_out: %dx%d@%dx%d loop_vid_cap: %dx%d@%dx%d\n",
190 dev->loop_vid_copy.width, dev->loop_vid_copy.height,
191 dev->loop_vid_copy.left, dev->loop_vid_copy.top,
192 dev->loop_vid_out.width, dev->loop_vid_out.height,
193 dev->loop_vid_out.left, dev->loop_vid_out.top,
194 dev->loop_vid_cap.width, dev->loop_vid_cap.height,
195 dev->loop_vid_cap.left, dev->loop_vid_cap.top);
196
197 v4l2_rect_intersect(&r_overlay, &r_fb, &r_overlay);
198
199 /* shift r_overlay to the same origin as compose_out */
200 r_overlay.left += dev->compose_out.left - dev->overlay_out_left;
201 r_overlay.top += dev->compose_out.top - dev->overlay_out_top;
202
203 v4l2_rect_intersect(&dev->loop_vid_overlay, &r_overlay, &dev->loop_vid_copy);
204 dev->loop_fb_copy = dev->loop_vid_overlay;
205
206 /* shift dev->loop_fb_copy back again to the fb origin */
207 dev->loop_fb_copy.left -= dev->compose_out.left - dev->overlay_out_left;
208 dev->loop_fb_copy.top -= dev->compose_out.top - dev->overlay_out_top;
209
210 dev->loop_vid_overlay_cap = dev->loop_vid_overlay;
211 v4l2_rect_scale(&dev->loop_vid_overlay_cap, &dev->crop_cap, &dev->compose_cap);
212
213 dprintk(dev, 1,
214 "loop_fb_copy: %dx%d@%dx%d loop_vid_overlay: %dx%d@%dx%d loop_vid_overlay_cap: %dx%d@%dx%d\n",
215 dev->loop_fb_copy.width, dev->loop_fb_copy.height,
216 dev->loop_fb_copy.left, dev->loop_fb_copy.top,
217 dev->loop_vid_overlay.width, dev->loop_vid_overlay.height,
218 dev->loop_vid_overlay.left, dev->loop_vid_overlay.top,
219 dev->loop_vid_overlay_cap.width, dev->loop_vid_overlay_cap.height,
220 dev->loop_vid_overlay_cap.left, dev->loop_vid_overlay_cap.top);
221 }
222
plane_vaddr(struct tpg_data * tpg,struct vivid_buffer * buf,unsigned p,unsigned bpl[TPG_MAX_PLANES],unsigned h)223 static void *plane_vaddr(struct tpg_data *tpg, struct vivid_buffer *buf,
224 unsigned p, unsigned bpl[TPG_MAX_PLANES], unsigned h)
225 {
226 unsigned i;
227 void *vbuf;
228
229 if (p == 0 || tpg_g_buffers(tpg) > 1)
230 return vb2_plane_vaddr(&buf->vb.vb2_buf, p);
231 vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
232 for (i = 0; i < p; i++)
233 vbuf += bpl[i] * h / tpg->vdownsampling[i];
234 return vbuf;
235 }
236
vivid_copy_buffer(struct vivid_dev * dev,unsigned p,u8 * vcapbuf,struct vivid_buffer * vid_cap_buf)237 static noinline_for_stack int vivid_copy_buffer(struct vivid_dev *dev, unsigned p,
238 u8 *vcapbuf, struct vivid_buffer *vid_cap_buf)
239 {
240 bool blank = dev->must_blank[vid_cap_buf->vb.vb2_buf.index];
241 struct tpg_data *tpg = &dev->tpg;
242 struct vivid_buffer *vid_out_buf = NULL;
243 unsigned vdiv = dev->fmt_out->vdownsampling[p];
244 unsigned twopixsize = tpg_g_twopixelsize(tpg, p);
245 unsigned img_width = tpg_hdiv(tpg, p, dev->compose_cap.width);
246 unsigned img_height = dev->compose_cap.height;
247 unsigned stride_cap = tpg->bytesperline[p];
248 unsigned stride_out = dev->bytesperline_out[p];
249 unsigned stride_osd = dev->display_byte_stride;
250 unsigned hmax = (img_height * tpg->perc_fill) / 100;
251 u8 *voutbuf;
252 u8 *vosdbuf = NULL;
253 unsigned y;
254 bool blend = dev->bitmap_out || dev->clipcount_out || dev->fbuf_out_flags;
255 /* Coarse scaling with Bresenham */
256 unsigned vid_out_int_part;
257 unsigned vid_out_fract_part;
258 unsigned vid_out_y = 0;
259 unsigned vid_out_error = 0;
260 unsigned vid_overlay_int_part = 0;
261 unsigned vid_overlay_fract_part = 0;
262 unsigned vid_overlay_y = 0;
263 unsigned vid_overlay_error = 0;
264 unsigned vid_cap_left = tpg_hdiv(tpg, p, dev->loop_vid_cap.left);
265 unsigned vid_cap_right;
266 bool quick;
267
268 vid_out_int_part = dev->loop_vid_out.height / dev->loop_vid_cap.height;
269 vid_out_fract_part = dev->loop_vid_out.height % dev->loop_vid_cap.height;
270
271 if (!list_empty(&dev->vid_out_active))
272 vid_out_buf = list_entry(dev->vid_out_active.next,
273 struct vivid_buffer, list);
274 if (vid_out_buf == NULL)
275 return -ENODATA;
276
277 vid_cap_buf->vb.field = vid_out_buf->vb.field;
278
279 voutbuf = plane_vaddr(tpg, vid_out_buf, p,
280 dev->bytesperline_out, dev->fmt_out_rect.height);
281 if (p < dev->fmt_out->buffers)
282 voutbuf += vid_out_buf->vb.vb2_buf.planes[p].data_offset;
283 voutbuf += tpg_hdiv(tpg, p, dev->loop_vid_out.left) +
284 (dev->loop_vid_out.top / vdiv) * stride_out;
285 vcapbuf += tpg_hdiv(tpg, p, dev->compose_cap.left) +
286 (dev->compose_cap.top / vdiv) * stride_cap;
287
288 if (dev->loop_vid_copy.width == 0 || dev->loop_vid_copy.height == 0) {
289 /*
290 * If there is nothing to copy, then just fill the capture window
291 * with black.
292 */
293 for (y = 0; y < hmax / vdiv; y++, vcapbuf += stride_cap)
294 memcpy(vcapbuf, tpg->black_line[p], img_width);
295 return 0;
296 }
297
298 if (dev->overlay_out_enabled &&
299 dev->loop_vid_overlay.width && dev->loop_vid_overlay.height) {
300 vosdbuf = dev->video_vbase;
301 vosdbuf += (dev->loop_fb_copy.left * twopixsize) / 2 +
302 dev->loop_fb_copy.top * stride_osd;
303 vid_overlay_int_part = dev->loop_vid_overlay.height /
304 dev->loop_vid_overlay_cap.height;
305 vid_overlay_fract_part = dev->loop_vid_overlay.height %
306 dev->loop_vid_overlay_cap.height;
307 }
308
309 vid_cap_right = tpg_hdiv(tpg, p, dev->loop_vid_cap.left + dev->loop_vid_cap.width);
310 /* quick is true if no video scaling is needed */
311 quick = dev->loop_vid_out.width == dev->loop_vid_cap.width;
312
313 dev->cur_scaled_line = dev->loop_vid_out.height;
314 for (y = 0; y < hmax; y += vdiv, vcapbuf += stride_cap) {
315 /* osdline is true if this line requires overlay blending */
316 bool osdline = vosdbuf && y >= dev->loop_vid_overlay_cap.top &&
317 y < dev->loop_vid_overlay_cap.top + dev->loop_vid_overlay_cap.height;
318
319 /*
320 * If this line of the capture buffer doesn't get any video, then
321 * just fill with black.
322 */
323 if (y < dev->loop_vid_cap.top ||
324 y >= dev->loop_vid_cap.top + dev->loop_vid_cap.height) {
325 memcpy(vcapbuf, tpg->black_line[p], img_width);
326 continue;
327 }
328
329 /* fill the left border with black */
330 if (dev->loop_vid_cap.left)
331 memcpy(vcapbuf, tpg->black_line[p], vid_cap_left);
332
333 /* fill the right border with black */
334 if (vid_cap_right < img_width)
335 memcpy(vcapbuf + vid_cap_right, tpg->black_line[p],
336 img_width - vid_cap_right);
337
338 if (quick && !osdline) {
339 memcpy(vcapbuf + vid_cap_left,
340 voutbuf + vid_out_y * stride_out,
341 tpg_hdiv(tpg, p, dev->loop_vid_cap.width));
342 goto update_vid_out_y;
343 }
344 if (dev->cur_scaled_line == vid_out_y) {
345 memcpy(vcapbuf + vid_cap_left, dev->scaled_line,
346 tpg_hdiv(tpg, p, dev->loop_vid_cap.width));
347 goto update_vid_out_y;
348 }
349 if (!osdline) {
350 scale_line(voutbuf + vid_out_y * stride_out, dev->scaled_line,
351 tpg_hdiv(tpg, p, dev->loop_vid_out.width),
352 tpg_hdiv(tpg, p, dev->loop_vid_cap.width),
353 tpg_g_twopixelsize(tpg, p));
354 } else {
355 /*
356 * Offset in bytes within loop_vid_copy to the start of the
357 * loop_vid_overlay rectangle.
358 */
359 unsigned offset =
360 ((dev->loop_vid_overlay.left - dev->loop_vid_copy.left) *
361 twopixsize) / 2;
362 u8 *osd = vosdbuf + vid_overlay_y * stride_osd;
363
364 scale_line(voutbuf + vid_out_y * stride_out, dev->blended_line,
365 dev->loop_vid_out.width, dev->loop_vid_copy.width,
366 tpg_g_twopixelsize(tpg, p));
367 if (blend)
368 blend_line(dev, vid_overlay_y + dev->loop_vid_overlay.top,
369 dev->loop_vid_overlay.left,
370 dev->blended_line + offset, osd,
371 dev->loop_vid_overlay.width, twopixsize / 2);
372 else
373 memcpy(dev->blended_line + offset,
374 osd, (dev->loop_vid_overlay.width * twopixsize) / 2);
375 scale_line(dev->blended_line, dev->scaled_line,
376 dev->loop_vid_copy.width, dev->loop_vid_cap.width,
377 tpg_g_twopixelsize(tpg, p));
378 }
379 dev->cur_scaled_line = vid_out_y;
380 memcpy(vcapbuf + vid_cap_left, dev->scaled_line,
381 tpg_hdiv(tpg, p, dev->loop_vid_cap.width));
382
383 update_vid_out_y:
384 if (osdline) {
385 vid_overlay_y += vid_overlay_int_part;
386 vid_overlay_error += vid_overlay_fract_part;
387 if (vid_overlay_error >= dev->loop_vid_overlay_cap.height) {
388 vid_overlay_error -= dev->loop_vid_overlay_cap.height;
389 vid_overlay_y++;
390 }
391 }
392 vid_out_y += vid_out_int_part;
393 vid_out_error += vid_out_fract_part;
394 if (vid_out_error >= dev->loop_vid_cap.height / vdiv) {
395 vid_out_error -= dev->loop_vid_cap.height / vdiv;
396 vid_out_y++;
397 }
398 }
399
400 if (!blank)
401 return 0;
402 for (; y < img_height; y += vdiv, vcapbuf += stride_cap)
403 memcpy(vcapbuf, tpg->contrast_line[p], img_width);
404 return 0;
405 }
406
vivid_fillbuff(struct vivid_dev * dev,struct vivid_buffer * buf)407 static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
408 {
409 struct tpg_data *tpg = &dev->tpg;
410 unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
411 unsigned line_height = 16 / factor;
412 bool is_tv = vivid_is_sdtv_cap(dev);
413 bool is_60hz = is_tv && (dev->std_cap[dev->input] & V4L2_STD_525_60);
414 unsigned p;
415 int line = 1;
416 u8 *basep[TPG_MAX_PLANES][2];
417 unsigned ms;
418 char str[100];
419 s32 gain;
420 bool is_loop = false;
421
422 if (dev->loop_video && dev->can_loop_video &&
423 ((vivid_is_svid_cap(dev) &&
424 !VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) ||
425 (vivid_is_hdmi_cap(dev) &&
426 !VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode[dev->input]))))
427 is_loop = true;
428
429 buf->vb.sequence = dev->vid_cap_seq_count;
430 v4l2_ctrl_s_ctrl(dev->ro_int32, buf->vb.sequence & 0xff);
431 if (dev->field_cap == V4L2_FIELD_ALTERNATE) {
432 /*
433 * 60 Hz standards start with the bottom field, 50 Hz standards
434 * with the top field. So if the 0-based seq_count is even,
435 * then the field is TOP for 50 Hz and BOTTOM for 60 Hz
436 * standards.
437 */
438 buf->vb.field = ((dev->vid_cap_seq_count & 1) ^ is_60hz) ?
439 V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
440 /*
441 * The sequence counter counts frames, not fields. So divide
442 * by two.
443 */
444 buf->vb.sequence /= 2;
445 } else {
446 buf->vb.field = dev->field_cap;
447 }
448 tpg_s_field(tpg, buf->vb.field,
449 dev->field_cap == V4L2_FIELD_ALTERNATE);
450 tpg_s_perc_fill_blank(tpg, dev->must_blank[buf->vb.vb2_buf.index]);
451
452 vivid_precalc_copy_rects(dev);
453
454 for (p = 0; p < tpg_g_planes(tpg); p++) {
455 void *vbuf = plane_vaddr(tpg, buf, p,
456 tpg->bytesperline, tpg->buf_height);
457
458 /*
459 * The first plane of a multiplanar format has a non-zero
460 * data_offset. This helps testing whether the application
461 * correctly supports non-zero data offsets.
462 */
463 if (p < tpg_g_buffers(tpg) && dev->fmt_cap->data_offset[p]) {
464 memset(vbuf, dev->fmt_cap->data_offset[p] & 0xff,
465 dev->fmt_cap->data_offset[p]);
466 vbuf += dev->fmt_cap->data_offset[p];
467 }
468 tpg_calc_text_basep(tpg, basep, p, vbuf);
469 if (!is_loop || vivid_copy_buffer(dev, p, vbuf, buf))
470 tpg_fill_plane_buffer(tpg, vivid_get_std_cap(dev),
471 p, vbuf);
472 }
473 dev->must_blank[buf->vb.vb2_buf.index] = false;
474
475 /* Updates stream time, only update at the start of a new frame. */
476 if (dev->field_cap != V4L2_FIELD_ALTERNATE ||
477 (dev->vid_cap_seq_count & 1) == 0)
478 dev->ms_vid_cap =
479 jiffies_to_msecs(jiffies - dev->jiffies_vid_cap);
480
481 ms = dev->ms_vid_cap;
482 if (dev->osd_mode <= 1) {
483 snprintf(str, sizeof(str), " %02d:%02d:%02d:%03d %u%s",
484 (ms / (60 * 60 * 1000)) % 24,
485 (ms / (60 * 1000)) % 60,
486 (ms / 1000) % 60,
487 ms % 1000,
488 buf->vb.sequence,
489 (dev->field_cap == V4L2_FIELD_ALTERNATE) ?
490 (buf->vb.field == V4L2_FIELD_TOP ?
491 " top" : " bottom") : "");
492 tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
493 }
494 if (dev->osd_mode == 0) {
495 snprintf(str, sizeof(str), " %dx%d, input %d ",
496 dev->src_rect.width, dev->src_rect.height, dev->input);
497 tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
498
499 gain = v4l2_ctrl_g_ctrl(dev->gain);
500 mutex_lock(dev->ctrl_hdl_user_vid.lock);
501 snprintf(str, sizeof(str),
502 " brightness %3d, contrast %3d, saturation %3d, hue %d ",
503 dev->brightness->cur.val,
504 dev->contrast->cur.val,
505 dev->saturation->cur.val,
506 dev->hue->cur.val);
507 tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
508 snprintf(str, sizeof(str),
509 " autogain %d, gain %3d, alpha 0x%02x ",
510 dev->autogain->cur.val, gain, dev->alpha->cur.val);
511 mutex_unlock(dev->ctrl_hdl_user_vid.lock);
512 tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
513 mutex_lock(dev->ctrl_hdl_user_aud.lock);
514 snprintf(str, sizeof(str),
515 " volume %3d, mute %d ",
516 dev->volume->cur.val, dev->mute->cur.val);
517 mutex_unlock(dev->ctrl_hdl_user_aud.lock);
518 tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
519 mutex_lock(dev->ctrl_hdl_user_gen.lock);
520 snprintf(str, sizeof(str), " int32 %d, ro_int32 %d, int64 %lld, bitmask %08x ",
521 dev->int32->cur.val,
522 dev->ro_int32->cur.val,
523 *dev->int64->p_cur.p_s64,
524 dev->bitmask->cur.val);
525 tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
526 snprintf(str, sizeof(str), " boolean %d, menu %s, string \"%s\" ",
527 dev->boolean->cur.val,
528 dev->menu->qmenu[dev->menu->cur.val],
529 dev->string->p_cur.p_char);
530 tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
531 snprintf(str, sizeof(str), " integer_menu %lld, value %d ",
532 dev->int_menu->qmenu_int[dev->int_menu->cur.val],
533 dev->int_menu->cur.val);
534 mutex_unlock(dev->ctrl_hdl_user_gen.lock);
535 tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
536 if (dev->button_pressed) {
537 dev->button_pressed--;
538 snprintf(str, sizeof(str), " button pressed!");
539 tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
540 }
541 if (dev->osd[0]) {
542 if (vivid_is_hdmi_cap(dev)) {
543 snprintf(str, sizeof(str),
544 " OSD \"%s\"", dev->osd);
545 tpg_gen_text(tpg, basep, line++ * line_height,
546 16, str);
547 }
548 if (dev->osd_jiffies &&
549 time_is_before_jiffies(dev->osd_jiffies + 5 * HZ)) {
550 dev->osd[0] = 0;
551 dev->osd_jiffies = 0;
552 }
553 }
554 }
555 }
556
557 /*
558 * Return true if this pixel coordinate is a valid video pixel.
559 */
valid_pix(struct vivid_dev * dev,int win_y,int win_x,int fb_y,int fb_x)560 static bool valid_pix(struct vivid_dev *dev, int win_y, int win_x, int fb_y, int fb_x)
561 {
562 int i;
563
564 if (dev->bitmap_cap) {
565 /*
566 * Only if the corresponding bit in the bitmap is set can
567 * the video pixel be shown. Coordinates are relative to
568 * the overlay window set by VIDIOC_S_FMT.
569 */
570 const u8 *p = dev->bitmap_cap;
571 unsigned stride = (dev->compose_cap.width + 7) / 8;
572
573 if (!(p[stride * win_y + win_x / 8] & (1 << (win_x & 7))))
574 return false;
575 }
576
577 for (i = 0; i < dev->clipcount_cap; i++) {
578 /*
579 * Only if the framebuffer coordinate is not in any of the
580 * clip rectangles will be video pixel be shown.
581 */
582 struct v4l2_rect *r = &dev->clips_cap[i].c;
583
584 if (fb_y >= r->top && fb_y < r->top + r->height &&
585 fb_x >= r->left && fb_x < r->left + r->width)
586 return false;
587 }
588 return true;
589 }
590
591 /*
592 * Draw the image into the overlay buffer.
593 * Note that the combination of overlay and multiplanar is not supported.
594 */
vivid_overlay(struct vivid_dev * dev,struct vivid_buffer * buf)595 static void vivid_overlay(struct vivid_dev *dev, struct vivid_buffer *buf)
596 {
597 struct tpg_data *tpg = &dev->tpg;
598 unsigned pixsize = tpg_g_twopixelsize(tpg, 0) / 2;
599 void *vbase = dev->fb_vbase_cap;
600 void *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
601 unsigned img_width = dev->compose_cap.width;
602 unsigned img_height = dev->compose_cap.height;
603 unsigned stride = tpg->bytesperline[0];
604 /* if quick is true, then valid_pix() doesn't have to be called */
605 bool quick = dev->bitmap_cap == NULL && dev->clipcount_cap == 0;
606 int x, y, w, out_x = 0;
607
608 /*
609 * Overlay support is only supported for formats that have a twopixelsize
610 * that's >= 2. Warn and bail out if that's not the case.
611 */
612 if (WARN_ON(pixsize == 0))
613 return;
614 if ((dev->overlay_cap_field == V4L2_FIELD_TOP ||
615 dev->overlay_cap_field == V4L2_FIELD_BOTTOM) &&
616 dev->overlay_cap_field != buf->vb.field)
617 return;
618
619 vbuf += dev->compose_cap.left * pixsize + dev->compose_cap.top * stride;
620 x = dev->overlay_cap_left;
621 w = img_width;
622 if (x < 0) {
623 out_x = -x;
624 w = w - out_x;
625 x = 0;
626 } else {
627 w = dev->fb_cap.fmt.width - x;
628 if (w > img_width)
629 w = img_width;
630 }
631 if (w <= 0)
632 return;
633 if (dev->overlay_cap_top >= 0)
634 vbase += dev->overlay_cap_top * dev->fb_cap.fmt.bytesperline;
635 for (y = dev->overlay_cap_top;
636 y < dev->overlay_cap_top + (int)img_height;
637 y++, vbuf += stride) {
638 int px;
639
640 if (y < 0 || y > dev->fb_cap.fmt.height)
641 continue;
642 if (quick) {
643 memcpy(vbase + x * pixsize,
644 vbuf + out_x * pixsize, w * pixsize);
645 vbase += dev->fb_cap.fmt.bytesperline;
646 continue;
647 }
648 for (px = 0; px < w; px++) {
649 if (!valid_pix(dev, y - dev->overlay_cap_top,
650 px + out_x, y, px + x))
651 continue;
652 memcpy(vbase + (px + x) * pixsize,
653 vbuf + (px + out_x) * pixsize,
654 pixsize);
655 }
656 vbase += dev->fb_cap.fmt.bytesperline;
657 }
658 }
659
vivid_cap_update_frame_period(struct vivid_dev * dev)660 static void vivid_cap_update_frame_period(struct vivid_dev *dev)
661 {
662 u64 f_period;
663
664 f_period = (u64)dev->timeperframe_vid_cap.numerator * 1000000000;
665 if (WARN_ON(dev->timeperframe_vid_cap.denominator == 0))
666 dev->timeperframe_vid_cap.denominator = 1;
667 do_div(f_period, dev->timeperframe_vid_cap.denominator);
668 if (dev->field_cap == V4L2_FIELD_ALTERNATE)
669 f_period >>= 1;
670 /*
671 * If "End of Frame", then offset the exposure time by 0.9
672 * of the frame period.
673 */
674 dev->cap_frame_eof_offset = f_period * 9;
675 do_div(dev->cap_frame_eof_offset, 10);
676 dev->cap_frame_period = f_period;
677 }
678
vivid_thread_vid_cap_tick(struct vivid_dev * dev,int dropped_bufs)679 static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
680 int dropped_bufs)
681 {
682 struct vivid_buffer *vid_cap_buf = NULL;
683 struct vivid_buffer *vbi_cap_buf = NULL;
684 struct vivid_buffer *meta_cap_buf = NULL;
685 u64 f_time = 0;
686
687 dprintk(dev, 1, "Video Capture Thread Tick\n");
688
689 while (dropped_bufs-- > 1)
690 tpg_update_mv_count(&dev->tpg,
691 dev->field_cap == V4L2_FIELD_NONE ||
692 dev->field_cap == V4L2_FIELD_ALTERNATE);
693
694 /* Drop a certain percentage of buffers. */
695 if (dev->perc_dropped_buffers &&
696 prandom_u32_max(100) < dev->perc_dropped_buffers)
697 goto update_mv;
698
699 spin_lock(&dev->slock);
700 if (!list_empty(&dev->vid_cap_active)) {
701 vid_cap_buf = list_entry(dev->vid_cap_active.next, struct vivid_buffer, list);
702 list_del(&vid_cap_buf->list);
703 }
704 if (!list_empty(&dev->vbi_cap_active)) {
705 if (dev->field_cap != V4L2_FIELD_ALTERNATE ||
706 (dev->vbi_cap_seq_count & 1)) {
707 vbi_cap_buf = list_entry(dev->vbi_cap_active.next,
708 struct vivid_buffer, list);
709 list_del(&vbi_cap_buf->list);
710 }
711 }
712 if (!list_empty(&dev->meta_cap_active)) {
713 meta_cap_buf = list_entry(dev->meta_cap_active.next,
714 struct vivid_buffer, list);
715 list_del(&meta_cap_buf->list);
716 }
717
718 spin_unlock(&dev->slock);
719
720 if (!vid_cap_buf && !vbi_cap_buf && !meta_cap_buf)
721 goto update_mv;
722
723 f_time = ktime_get_ns() + dev->time_wrap_offset;
724
725 if (vid_cap_buf) {
726 v4l2_ctrl_request_setup(vid_cap_buf->vb.vb2_buf.req_obj.req,
727 &dev->ctrl_hdl_vid_cap);
728 /* Fill buffer */
729 vivid_fillbuff(dev, vid_cap_buf);
730 dprintk(dev, 1, "filled buffer %d\n",
731 vid_cap_buf->vb.vb2_buf.index);
732
733 /* Handle overlay */
734 if (dev->overlay_cap_owner && dev->fb_cap.base &&
735 dev->fb_cap.fmt.pixelformat == dev->fmt_cap->fourcc)
736 vivid_overlay(dev, vid_cap_buf);
737
738 v4l2_ctrl_request_complete(vid_cap_buf->vb.vb2_buf.req_obj.req,
739 &dev->ctrl_hdl_vid_cap);
740 vb2_buffer_done(&vid_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
741 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
742 dprintk(dev, 2, "vid_cap buffer %d done\n",
743 vid_cap_buf->vb.vb2_buf.index);
744
745 vid_cap_buf->vb.vb2_buf.timestamp = f_time;
746 if (!dev->tstamp_src_is_soe)
747 vid_cap_buf->vb.vb2_buf.timestamp += dev->cap_frame_eof_offset;
748 }
749
750 if (vbi_cap_buf) {
751 u64 vbi_period;
752
753 v4l2_ctrl_request_setup(vbi_cap_buf->vb.vb2_buf.req_obj.req,
754 &dev->ctrl_hdl_vbi_cap);
755 if (vbi_cap_buf->vb.vb2_buf.type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
756 vivid_sliced_vbi_cap_process(dev, vbi_cap_buf);
757 else
758 vivid_raw_vbi_cap_process(dev, vbi_cap_buf);
759 v4l2_ctrl_request_complete(vbi_cap_buf->vb.vb2_buf.req_obj.req,
760 &dev->ctrl_hdl_vbi_cap);
761 vb2_buffer_done(&vbi_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
762 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
763 dprintk(dev, 2, "vbi_cap %d done\n",
764 vbi_cap_buf->vb.vb2_buf.index);
765
766 /* If capturing a VBI, offset by 0.05 */
767 vbi_period = dev->cap_frame_period * 5;
768 do_div(vbi_period, 100);
769 vbi_cap_buf->vb.vb2_buf.timestamp = f_time + dev->cap_frame_eof_offset + vbi_period;
770 }
771
772 if (meta_cap_buf) {
773 v4l2_ctrl_request_setup(meta_cap_buf->vb.vb2_buf.req_obj.req,
774 &dev->ctrl_hdl_meta_cap);
775 vivid_meta_cap_fillbuff(dev, meta_cap_buf, f_time);
776 v4l2_ctrl_request_complete(meta_cap_buf->vb.vb2_buf.req_obj.req,
777 &dev->ctrl_hdl_meta_cap);
778 vb2_buffer_done(&meta_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
779 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
780 dprintk(dev, 2, "meta_cap %d done\n",
781 meta_cap_buf->vb.vb2_buf.index);
782 meta_cap_buf->vb.vb2_buf.timestamp = f_time + dev->cap_frame_eof_offset;
783 }
784
785 dev->dqbuf_error = false;
786
787 update_mv:
788 /* Update the test pattern movement counters */
789 tpg_update_mv_count(&dev->tpg, dev->field_cap == V4L2_FIELD_NONE ||
790 dev->field_cap == V4L2_FIELD_ALTERNATE);
791 }
792
vivid_thread_vid_cap(void * data)793 static int vivid_thread_vid_cap(void *data)
794 {
795 struct vivid_dev *dev = data;
796 u64 numerators_since_start;
797 u64 buffers_since_start;
798 u64 next_jiffies_since_start;
799 unsigned long jiffies_since_start;
800 unsigned long cur_jiffies;
801 unsigned wait_jiffies;
802 unsigned numerator;
803 unsigned denominator;
804 int dropped_bufs;
805
806 dprintk(dev, 1, "Video Capture Thread Start\n");
807
808 set_freezable();
809
810 /* Resets frame counters */
811 dev->cap_seq_offset = 0;
812 dev->cap_seq_count = 0;
813 dev->cap_seq_resync = false;
814 dev->jiffies_vid_cap = jiffies;
815 dev->cap_stream_start = ktime_get_ns();
816 if (dev->time_wrap)
817 dev->time_wrap_offset = dev->time_wrap - dev->cap_stream_start;
818 else
819 dev->time_wrap_offset = 0;
820 vivid_cap_update_frame_period(dev);
821
822 for (;;) {
823 try_to_freeze();
824 if (kthread_should_stop())
825 break;
826
827 if (!mutex_trylock(&dev->mutex)) {
828 schedule();
829 continue;
830 }
831
832 cur_jiffies = jiffies;
833 if (dev->cap_seq_resync) {
834 dev->jiffies_vid_cap = cur_jiffies;
835 dev->cap_seq_offset = dev->cap_seq_count + 1;
836 dev->cap_seq_count = 0;
837 dev->cap_stream_start += dev->cap_frame_period *
838 dev->cap_seq_offset;
839 vivid_cap_update_frame_period(dev);
840 dev->cap_seq_resync = false;
841 }
842 numerator = dev->timeperframe_vid_cap.numerator;
843 denominator = dev->timeperframe_vid_cap.denominator;
844
845 if (dev->field_cap == V4L2_FIELD_ALTERNATE)
846 denominator *= 2;
847
848 /* Calculate the number of jiffies since we started streaming */
849 jiffies_since_start = cur_jiffies - dev->jiffies_vid_cap;
850 /* Get the number of buffers streamed since the start */
851 buffers_since_start = (u64)jiffies_since_start * denominator +
852 (HZ * numerator) / 2;
853 do_div(buffers_since_start, HZ * numerator);
854
855 /*
856 * After more than 0xf0000000 (rounded down to a multiple of
857 * 'jiffies-per-day' to ease jiffies_to_msecs calculation)
858 * jiffies have passed since we started streaming reset the
859 * counters and keep track of the sequence offset.
860 */
861 if (jiffies_since_start > JIFFIES_RESYNC) {
862 dev->jiffies_vid_cap = cur_jiffies;
863 dev->cap_seq_offset = buffers_since_start;
864 buffers_since_start = 0;
865 }
866 dropped_bufs = buffers_since_start + dev->cap_seq_offset - dev->cap_seq_count;
867 dev->cap_seq_count = buffers_since_start + dev->cap_seq_offset;
868 dev->vid_cap_seq_count = dev->cap_seq_count - dev->vid_cap_seq_start;
869 dev->vbi_cap_seq_count = dev->cap_seq_count - dev->vbi_cap_seq_start;
870 dev->meta_cap_seq_count = dev->cap_seq_count - dev->meta_cap_seq_start;
871
872 vivid_thread_vid_cap_tick(dev, dropped_bufs);
873
874 /*
875 * Calculate the number of 'numerators' streamed since we started,
876 * including the current buffer.
877 */
878 numerators_since_start = ++buffers_since_start * numerator;
879
880 /* And the number of jiffies since we started */
881 jiffies_since_start = jiffies - dev->jiffies_vid_cap;
882
883 mutex_unlock(&dev->mutex);
884
885 /*
886 * Calculate when that next buffer is supposed to start
887 * in jiffies since we started streaming.
888 */
889 next_jiffies_since_start = numerators_since_start * HZ +
890 denominator / 2;
891 do_div(next_jiffies_since_start, denominator);
892 /* If it is in the past, then just schedule asap */
893 if (next_jiffies_since_start < jiffies_since_start)
894 next_jiffies_since_start = jiffies_since_start;
895
896 wait_jiffies = next_jiffies_since_start - jiffies_since_start;
897 while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
898 !kthread_should_stop())
899 schedule();
900 }
901 dprintk(dev, 1, "Video Capture Thread End\n");
902 return 0;
903 }
904
vivid_grab_controls(struct vivid_dev * dev,bool grab)905 static void vivid_grab_controls(struct vivid_dev *dev, bool grab)
906 {
907 v4l2_ctrl_grab(dev->ctrl_has_crop_cap, grab);
908 v4l2_ctrl_grab(dev->ctrl_has_compose_cap, grab);
909 v4l2_ctrl_grab(dev->ctrl_has_scaler_cap, grab);
910 }
911
vivid_start_generating_vid_cap(struct vivid_dev * dev,bool * pstreaming)912 int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
913 {
914 dprintk(dev, 1, "%s\n", __func__);
915
916 if (dev->kthread_vid_cap) {
917 u32 seq_count = dev->cap_seq_count + dev->seq_wrap * 128;
918
919 if (pstreaming == &dev->vid_cap_streaming)
920 dev->vid_cap_seq_start = seq_count;
921 else if (pstreaming == &dev->vbi_cap_streaming)
922 dev->vbi_cap_seq_start = seq_count;
923 else
924 dev->meta_cap_seq_start = seq_count;
925 *pstreaming = true;
926 return 0;
927 }
928
929 /* Resets frame counters */
930 tpg_init_mv_count(&dev->tpg);
931
932 dev->vid_cap_seq_start = dev->seq_wrap * 128;
933 dev->vbi_cap_seq_start = dev->seq_wrap * 128;
934 dev->meta_cap_seq_start = dev->seq_wrap * 128;
935
936 dev->kthread_vid_cap = kthread_run(vivid_thread_vid_cap, dev,
937 "%s-vid-cap", dev->v4l2_dev.name);
938
939 if (IS_ERR(dev->kthread_vid_cap)) {
940 int err = PTR_ERR(dev->kthread_vid_cap);
941
942 dev->kthread_vid_cap = NULL;
943 v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
944 return err;
945 }
946 *pstreaming = true;
947 vivid_grab_controls(dev, true);
948
949 dprintk(dev, 1, "returning from %s\n", __func__);
950 return 0;
951 }
952
vivid_stop_generating_vid_cap(struct vivid_dev * dev,bool * pstreaming)953 void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
954 {
955 dprintk(dev, 1, "%s\n", __func__);
956
957 if (dev->kthread_vid_cap == NULL)
958 return;
959
960 *pstreaming = false;
961 if (pstreaming == &dev->vid_cap_streaming) {
962 /* Release all active buffers */
963 while (!list_empty(&dev->vid_cap_active)) {
964 struct vivid_buffer *buf;
965
966 buf = list_entry(dev->vid_cap_active.next,
967 struct vivid_buffer, list);
968 list_del(&buf->list);
969 v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
970 &dev->ctrl_hdl_vid_cap);
971 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
972 dprintk(dev, 2, "vid_cap buffer %d done\n",
973 buf->vb.vb2_buf.index);
974 }
975 }
976
977 if (pstreaming == &dev->vbi_cap_streaming) {
978 while (!list_empty(&dev->vbi_cap_active)) {
979 struct vivid_buffer *buf;
980
981 buf = list_entry(dev->vbi_cap_active.next,
982 struct vivid_buffer, list);
983 list_del(&buf->list);
984 v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
985 &dev->ctrl_hdl_vbi_cap);
986 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
987 dprintk(dev, 2, "vbi_cap buffer %d done\n",
988 buf->vb.vb2_buf.index);
989 }
990 }
991
992 if (pstreaming == &dev->meta_cap_streaming) {
993 while (!list_empty(&dev->meta_cap_active)) {
994 struct vivid_buffer *buf;
995
996 buf = list_entry(dev->meta_cap_active.next,
997 struct vivid_buffer, list);
998 list_del(&buf->list);
999 v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
1000 &dev->ctrl_hdl_meta_cap);
1001 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1002 dprintk(dev, 2, "meta_cap buffer %d done\n",
1003 buf->vb.vb2_buf.index);
1004 }
1005 }
1006
1007 if (dev->vid_cap_streaming || dev->vbi_cap_streaming ||
1008 dev->meta_cap_streaming)
1009 return;
1010
1011 /* shutdown control thread */
1012 vivid_grab_controls(dev, false);
1013 kthread_stop(dev->kthread_vid_cap);
1014 dev->kthread_vid_cap = NULL;
1015 }
1016