1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * vivid-vid-cap.c - video capture support functions.
4 *
5 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6 */
7
8 #include <linux/errno.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/vmalloc.h>
12 #include <linux/videodev2.h>
13 #include <linux/v4l2-dv-timings.h>
14 #include <media/v4l2-common.h>
15 #include <media/v4l2-event.h>
16 #include <media/v4l2-dv-timings.h>
17 #include <media/v4l2-rect.h>
18
19 #include "vivid-core.h"
20 #include "vivid-vid-common.h"
21 #include "vivid-kthread-cap.h"
22 #include "vivid-vid-cap.h"
23
24 static const struct vivid_fmt formats_ovl[] = {
25 {
26 .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
27 .vdownsampling = { 1 },
28 .bit_depth = { 16 },
29 .planes = 1,
30 .buffers = 1,
31 },
32 {
33 .fourcc = V4L2_PIX_FMT_XRGB555, /* gggbbbbb arrrrrgg */
34 .vdownsampling = { 1 },
35 .bit_depth = { 16 },
36 .planes = 1,
37 .buffers = 1,
38 },
39 {
40 .fourcc = V4L2_PIX_FMT_ARGB555, /* gggbbbbb arrrrrgg */
41 .vdownsampling = { 1 },
42 .bit_depth = { 16 },
43 .planes = 1,
44 .buffers = 1,
45 },
46 };
47
48 /* The number of discrete webcam framesizes */
49 #define VIVID_WEBCAM_SIZES 6
50 /* The number of discrete webcam frameintervals */
51 #define VIVID_WEBCAM_IVALS (VIVID_WEBCAM_SIZES * 2)
52
53 /* Sizes must be in increasing order */
54 static const struct v4l2_frmsize_discrete webcam_sizes[VIVID_WEBCAM_SIZES] = {
55 { 320, 180 },
56 { 640, 360 },
57 { 640, 480 },
58 { 1280, 720 },
59 { 1920, 1080 },
60 { 3840, 2160 },
61 };
62
63 /*
64 * Intervals must be in increasing order and there must be twice as many
65 * elements in this array as there are in webcam_sizes.
66 */
67 static const struct v4l2_fract webcam_intervals[VIVID_WEBCAM_IVALS] = {
68 { 1, 1 },
69 { 1, 2 },
70 { 1, 4 },
71 { 1, 5 },
72 { 1, 10 },
73 { 2, 25 },
74 { 1, 15 },
75 { 1, 25 },
76 { 1, 30 },
77 { 1, 40 },
78 { 1, 50 },
79 { 1, 60 },
80 };
81
vid_cap_queue_setup(struct vb2_queue * vq,unsigned * nbuffers,unsigned * nplanes,unsigned sizes[],struct device * alloc_devs[])82 static int vid_cap_queue_setup(struct vb2_queue *vq,
83 unsigned *nbuffers, unsigned *nplanes,
84 unsigned sizes[], struct device *alloc_devs[])
85 {
86 struct vivid_dev *dev = vb2_get_drv_priv(vq);
87 unsigned buffers = tpg_g_buffers(&dev->tpg);
88 unsigned h = dev->fmt_cap_rect.height;
89 unsigned p;
90
91 if (dev->field_cap == V4L2_FIELD_ALTERNATE) {
92 /*
93 * You cannot use read() with FIELD_ALTERNATE since the field
94 * information (TOP/BOTTOM) cannot be passed back to the user.
95 */
96 if (vb2_fileio_is_active(vq))
97 return -EINVAL;
98 }
99
100 if (dev->queue_setup_error) {
101 /*
102 * Error injection: test what happens if queue_setup() returns
103 * an error.
104 */
105 dev->queue_setup_error = false;
106 return -EINVAL;
107 }
108 if (*nplanes) {
109 /*
110 * Check if the number of requested planes match
111 * the number of buffers in the current format. You can't mix that.
112 */
113 if (*nplanes != buffers)
114 return -EINVAL;
115 for (p = 0; p < buffers; p++) {
116 if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h +
117 dev->fmt_cap->data_offset[p])
118 return -EINVAL;
119 }
120 } else {
121 for (p = 0; p < buffers; p++)
122 sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) /
123 dev->fmt_cap->vdownsampling[p] +
124 dev->fmt_cap->data_offset[p];
125 }
126
127 if (vq->num_buffers + *nbuffers < 2)
128 *nbuffers = 2 - vq->num_buffers;
129
130 *nplanes = buffers;
131
132 dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers);
133 for (p = 0; p < buffers; p++)
134 dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]);
135
136 return 0;
137 }
138
vid_cap_buf_prepare(struct vb2_buffer * vb)139 static int vid_cap_buf_prepare(struct vb2_buffer *vb)
140 {
141 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
142 unsigned long size;
143 unsigned buffers = tpg_g_buffers(&dev->tpg);
144 unsigned p;
145
146 dprintk(dev, 1, "%s\n", __func__);
147
148 if (WARN_ON(NULL == dev->fmt_cap))
149 return -EINVAL;
150
151 if (dev->buf_prepare_error) {
152 /*
153 * Error injection: test what happens if buf_prepare() returns
154 * an error.
155 */
156 dev->buf_prepare_error = false;
157 return -EINVAL;
158 }
159 for (p = 0; p < buffers; p++) {
160 size = (tpg_g_line_width(&dev->tpg, p) *
161 dev->fmt_cap_rect.height) /
162 dev->fmt_cap->vdownsampling[p] +
163 dev->fmt_cap->data_offset[p];
164
165 if (vb2_plane_size(vb, p) < size) {
166 dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n",
167 __func__, p, vb2_plane_size(vb, p), size);
168 return -EINVAL;
169 }
170
171 vb2_set_plane_payload(vb, p, size);
172 vb->planes[p].data_offset = dev->fmt_cap->data_offset[p];
173 }
174
175 return 0;
176 }
177
vid_cap_buf_finish(struct vb2_buffer * vb)178 static void vid_cap_buf_finish(struct vb2_buffer *vb)
179 {
180 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
181 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
182 struct v4l2_timecode *tc = &vbuf->timecode;
183 unsigned fps = 25;
184 unsigned seq = vbuf->sequence;
185
186 if (!vivid_is_sdtv_cap(dev))
187 return;
188
189 /*
190 * Set the timecode. Rarely used, so it is interesting to
191 * test this.
192 */
193 vbuf->flags |= V4L2_BUF_FLAG_TIMECODE;
194 if (dev->std_cap[dev->input] & V4L2_STD_525_60)
195 fps = 30;
196 tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS;
197 tc->flags = 0;
198 tc->frames = seq % fps;
199 tc->seconds = (seq / fps) % 60;
200 tc->minutes = (seq / (60 * fps)) % 60;
201 tc->hours = (seq / (60 * 60 * fps)) % 24;
202 }
203
vid_cap_buf_queue(struct vb2_buffer * vb)204 static void vid_cap_buf_queue(struct vb2_buffer *vb)
205 {
206 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
207 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
208 struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
209
210 dprintk(dev, 1, "%s\n", __func__);
211
212 spin_lock(&dev->slock);
213 list_add_tail(&buf->list, &dev->vid_cap_active);
214 spin_unlock(&dev->slock);
215 }
216
vid_cap_start_streaming(struct vb2_queue * vq,unsigned count)217 static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
218 {
219 struct vivid_dev *dev = vb2_get_drv_priv(vq);
220 unsigned i;
221 int err;
222
223 if (vb2_is_streaming(&dev->vb_vid_out_q))
224 dev->can_loop_video = vivid_vid_can_loop(dev);
225
226 dev->vid_cap_seq_count = 0;
227 dprintk(dev, 1, "%s\n", __func__);
228 for (i = 0; i < VIDEO_MAX_FRAME; i++)
229 dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100;
230 if (dev->start_streaming_error) {
231 dev->start_streaming_error = false;
232 err = -EINVAL;
233 } else {
234 err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming);
235 }
236 if (err) {
237 struct vivid_buffer *buf, *tmp;
238
239 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
240 list_del(&buf->list);
241 vb2_buffer_done(&buf->vb.vb2_buf,
242 VB2_BUF_STATE_QUEUED);
243 }
244 }
245 return err;
246 }
247
248 /* abort streaming and wait for last buffer */
vid_cap_stop_streaming(struct vb2_queue * vq)249 static void vid_cap_stop_streaming(struct vb2_queue *vq)
250 {
251 struct vivid_dev *dev = vb2_get_drv_priv(vq);
252
253 dprintk(dev, 1, "%s\n", __func__);
254 vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming);
255 dev->can_loop_video = false;
256 }
257
vid_cap_buf_request_complete(struct vb2_buffer * vb)258 static void vid_cap_buf_request_complete(struct vb2_buffer *vb)
259 {
260 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
261
262 v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_cap);
263 }
264
265 const struct vb2_ops vivid_vid_cap_qops = {
266 .queue_setup = vid_cap_queue_setup,
267 .buf_prepare = vid_cap_buf_prepare,
268 .buf_finish = vid_cap_buf_finish,
269 .buf_queue = vid_cap_buf_queue,
270 .start_streaming = vid_cap_start_streaming,
271 .stop_streaming = vid_cap_stop_streaming,
272 .buf_request_complete = vid_cap_buf_request_complete,
273 .wait_prepare = vb2_ops_wait_prepare,
274 .wait_finish = vb2_ops_wait_finish,
275 };
276
277 /*
278 * Determine the 'picture' quality based on the current TV frequency: either
279 * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off
280 * signal or NOISE for no signal.
281 */
vivid_update_quality(struct vivid_dev * dev)282 void vivid_update_quality(struct vivid_dev *dev)
283 {
284 unsigned freq_modulus;
285
286 if (dev->loop_video && (vivid_is_svid_cap(dev) || vivid_is_hdmi_cap(dev))) {
287 /*
288 * The 'noise' will only be replaced by the actual video
289 * if the output video matches the input video settings.
290 */
291 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
292 return;
293 }
294 if (vivid_is_hdmi_cap(dev) &&
295 VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode[dev->input])) {
296 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
297 return;
298 }
299 if (vivid_is_sdtv_cap(dev) &&
300 VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) {
301 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
302 return;
303 }
304 if (!vivid_is_tv_cap(dev)) {
305 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0);
306 return;
307 }
308
309 /*
310 * There is a fake channel every 6 MHz at 49.25, 55.25, etc.
311 * From +/- 0.25 MHz around the channel there is color, and from
312 * +/- 1 MHz there is grayscale (chroma is lost).
313 * Everywhere else it is just noise.
314 */
315 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16);
316 if (freq_modulus > 2 * 16) {
317 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE,
318 next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f);
319 return;
320 }
321 if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/)
322 tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0);
323 else
324 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0);
325 }
326
327 /*
328 * Get the current picture quality and the associated afc value.
329 */
vivid_get_quality(struct vivid_dev * dev,s32 * afc)330 static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc)
331 {
332 unsigned freq_modulus;
333
334 if (afc)
335 *afc = 0;
336 if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR ||
337 tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE)
338 return tpg_g_quality(&dev->tpg);
339
340 /*
341 * There is a fake channel every 6 MHz at 49.25, 55.25, etc.
342 * From +/- 0.25 MHz around the channel there is color, and from
343 * +/- 1 MHz there is grayscale (chroma is lost).
344 * Everywhere else it is just gray.
345 */
346 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16);
347 if (afc)
348 *afc = freq_modulus - 1 * 16;
349 return TPG_QUAL_GRAY;
350 }
351
vivid_get_video_aspect(const struct vivid_dev * dev)352 enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev)
353 {
354 if (vivid_is_sdtv_cap(dev))
355 return dev->std_aspect_ratio[dev->input];
356
357 if (vivid_is_hdmi_cap(dev))
358 return dev->dv_timings_aspect_ratio[dev->input];
359
360 return TPG_VIDEO_ASPECT_IMAGE;
361 }
362
vivid_get_pixel_aspect(const struct vivid_dev * dev)363 static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev)
364 {
365 if (vivid_is_sdtv_cap(dev))
366 return (dev->std_cap[dev->input] & V4L2_STD_525_60) ?
367 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
368
369 if (vivid_is_hdmi_cap(dev) &&
370 dev->src_rect.width == 720 && dev->src_rect.height <= 576)
371 return dev->src_rect.height == 480 ?
372 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
373
374 return TPG_PIXEL_ASPECT_SQUARE;
375 }
376
377 /*
378 * Called whenever the format has to be reset which can occur when
379 * changing inputs, standard, timings, etc.
380 */
vivid_update_format_cap(struct vivid_dev * dev,bool keep_controls)381 void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
382 {
383 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt;
384 unsigned size;
385 u64 pixelclock;
386
387 switch (dev->input_type[dev->input]) {
388 case WEBCAM:
389 default:
390 dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width;
391 dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height;
392 dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx];
393 dev->field_cap = V4L2_FIELD_NONE;
394 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO);
395 break;
396 case TV:
397 case SVID:
398 dev->field_cap = dev->tv_field_cap;
399 dev->src_rect.width = 720;
400 if (dev->std_cap[dev->input] & V4L2_STD_525_60) {
401 dev->src_rect.height = 480;
402 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 };
403 dev->service_set_cap = V4L2_SLICED_CAPTION_525;
404 } else {
405 dev->src_rect.height = 576;
406 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 };
407 dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B;
408 }
409 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO);
410 break;
411 case HDMI:
412 dev->src_rect.width = bt->width;
413 dev->src_rect.height = bt->height;
414 size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt);
415 if (dev->reduced_fps && can_reduce_fps(bt)) {
416 pixelclock = div_u64(bt->pixelclock * 1000, 1001);
417 bt->flags |= V4L2_DV_FL_REDUCED_FPS;
418 } else {
419 pixelclock = bt->pixelclock;
420 bt->flags &= ~V4L2_DV_FL_REDUCED_FPS;
421 }
422 dev->timeperframe_vid_cap = (struct v4l2_fract) {
423 size / 100, (u32)pixelclock / 100
424 };
425 if (bt->interlaced)
426 dev->field_cap = V4L2_FIELD_ALTERNATE;
427 else
428 dev->field_cap = V4L2_FIELD_NONE;
429
430 /*
431 * We can be called from within s_ctrl, in that case we can't
432 * set/get controls. Luckily we don't need to in that case.
433 */
434 if (keep_controls || !dev->colorspace)
435 break;
436 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) {
437 if (bt->width == 720 && bt->height <= 576)
438 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
439 else
440 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709);
441 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1);
442 } else {
443 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
444 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0);
445 }
446 tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
447 break;
448 }
449 vfree(dev->bitmap_cap);
450 dev->bitmap_cap = NULL;
451 vivid_update_quality(dev);
452 tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
453 dev->crop_cap = dev->src_rect;
454 dev->crop_bounds_cap = dev->src_rect;
455 dev->compose_cap = dev->crop_cap;
456 if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap))
457 dev->compose_cap.height /= 2;
458 dev->fmt_cap_rect = dev->compose_cap;
459 tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev));
460 tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev));
461 tpg_update_mv_step(&dev->tpg);
462 }
463
464 /* Map the field to something that is valid for the current input */
vivid_field_cap(struct vivid_dev * dev,enum v4l2_field field)465 static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field)
466 {
467 if (vivid_is_sdtv_cap(dev)) {
468 switch (field) {
469 case V4L2_FIELD_INTERLACED_TB:
470 case V4L2_FIELD_INTERLACED_BT:
471 case V4L2_FIELD_SEQ_TB:
472 case V4L2_FIELD_SEQ_BT:
473 case V4L2_FIELD_TOP:
474 case V4L2_FIELD_BOTTOM:
475 case V4L2_FIELD_ALTERNATE:
476 return field;
477 case V4L2_FIELD_INTERLACED:
478 default:
479 return V4L2_FIELD_INTERLACED;
480 }
481 }
482 if (vivid_is_hdmi_cap(dev))
483 return dev->dv_timings_cap[dev->input].bt.interlaced ?
484 V4L2_FIELD_ALTERNATE : V4L2_FIELD_NONE;
485 return V4L2_FIELD_NONE;
486 }
487
vivid_colorspace_cap(struct vivid_dev * dev)488 static unsigned vivid_colorspace_cap(struct vivid_dev *dev)
489 {
490 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
491 return tpg_g_colorspace(&dev->tpg);
492 return dev->colorspace_out;
493 }
494
vivid_xfer_func_cap(struct vivid_dev * dev)495 static unsigned vivid_xfer_func_cap(struct vivid_dev *dev)
496 {
497 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
498 return tpg_g_xfer_func(&dev->tpg);
499 return dev->xfer_func_out;
500 }
501
vivid_ycbcr_enc_cap(struct vivid_dev * dev)502 static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev)
503 {
504 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
505 return tpg_g_ycbcr_enc(&dev->tpg);
506 return dev->ycbcr_enc_out;
507 }
508
vivid_hsv_enc_cap(struct vivid_dev * dev)509 static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev)
510 {
511 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
512 return tpg_g_hsv_enc(&dev->tpg);
513 return dev->hsv_enc_out;
514 }
515
vivid_quantization_cap(struct vivid_dev * dev)516 static unsigned vivid_quantization_cap(struct vivid_dev *dev)
517 {
518 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
519 return tpg_g_quantization(&dev->tpg);
520 return dev->quantization_out;
521 }
522
vivid_g_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)523 int vivid_g_fmt_vid_cap(struct file *file, void *priv,
524 struct v4l2_format *f)
525 {
526 struct vivid_dev *dev = video_drvdata(file);
527 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
528 unsigned p;
529
530 mp->width = dev->fmt_cap_rect.width;
531 mp->height = dev->fmt_cap_rect.height;
532 mp->field = dev->field_cap;
533 mp->pixelformat = dev->fmt_cap->fourcc;
534 mp->colorspace = vivid_colorspace_cap(dev);
535 mp->xfer_func = vivid_xfer_func_cap(dev);
536 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV)
537 mp->hsv_enc = vivid_hsv_enc_cap(dev);
538 else
539 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
540 mp->quantization = vivid_quantization_cap(dev);
541 mp->num_planes = dev->fmt_cap->buffers;
542 for (p = 0; p < mp->num_planes; p++) {
543 mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p);
544 mp->plane_fmt[p].sizeimage =
545 (tpg_g_line_width(&dev->tpg, p) * mp->height) /
546 dev->fmt_cap->vdownsampling[p] +
547 dev->fmt_cap->data_offset[p];
548 }
549 return 0;
550 }
551
vivid_try_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)552 int vivid_try_fmt_vid_cap(struct file *file, void *priv,
553 struct v4l2_format *f)
554 {
555 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
556 struct v4l2_plane_pix_format *pfmt = mp->plane_fmt;
557 struct vivid_dev *dev = video_drvdata(file);
558 const struct vivid_fmt *fmt;
559 unsigned bytesperline, max_bpl;
560 unsigned factor = 1;
561 unsigned w, h;
562 unsigned p;
563 bool user_set_csc = !!(mp->flags & V4L2_PIX_FMT_FLAG_SET_CSC);
564
565 fmt = vivid_get_format(dev, mp->pixelformat);
566 if (!fmt) {
567 dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n",
568 mp->pixelformat);
569 mp->pixelformat = V4L2_PIX_FMT_YUYV;
570 fmt = vivid_get_format(dev, mp->pixelformat);
571 }
572
573 mp->field = vivid_field_cap(dev, mp->field);
574 if (vivid_is_webcam(dev)) {
575 const struct v4l2_frmsize_discrete *sz =
576 v4l2_find_nearest_size(webcam_sizes,
577 VIVID_WEBCAM_SIZES, width,
578 height, mp->width, mp->height);
579
580 w = sz->width;
581 h = sz->height;
582 } else if (vivid_is_sdtv_cap(dev)) {
583 w = 720;
584 h = (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 480 : 576;
585 } else {
586 w = dev->src_rect.width;
587 h = dev->src_rect.height;
588 }
589 if (V4L2_FIELD_HAS_T_OR_B(mp->field))
590 factor = 2;
591 if (vivid_is_webcam(dev) ||
592 (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) {
593 mp->width = w;
594 mp->height = h / factor;
595 } else {
596 struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor };
597
598 v4l2_rect_set_min_size(&r, &vivid_min_rect);
599 v4l2_rect_set_max_size(&r, &vivid_max_rect);
600 if (dev->has_scaler_cap && !dev->has_compose_cap) {
601 struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h };
602
603 v4l2_rect_set_max_size(&r, &max_r);
604 } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) {
605 v4l2_rect_set_max_size(&r, &dev->src_rect);
606 } else if (!dev->has_scaler_cap && !dev->has_crop_cap) {
607 v4l2_rect_set_min_size(&r, &dev->src_rect);
608 }
609 mp->width = r.width;
610 mp->height = r.height / factor;
611 }
612
613 /* This driver supports custom bytesperline values */
614
615 mp->num_planes = fmt->buffers;
616 for (p = 0; p < fmt->buffers; p++) {
617 /* Calculate the minimum supported bytesperline value */
618 bytesperline = (mp->width * fmt->bit_depth[p]) >> 3;
619 /* Calculate the maximum supported bytesperline value */
620 max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3;
621
622 if (pfmt[p].bytesperline > max_bpl)
623 pfmt[p].bytesperline = max_bpl;
624 if (pfmt[p].bytesperline < bytesperline)
625 pfmt[p].bytesperline = bytesperline;
626
627 pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) /
628 fmt->vdownsampling[p] + fmt->data_offset[p];
629
630 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
631 }
632 for (p = fmt->buffers; p < fmt->planes; p++)
633 pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height *
634 (fmt->bit_depth[p] / fmt->vdownsampling[p])) /
635 (fmt->bit_depth[0] / fmt->vdownsampling[0]);
636
637 if (!user_set_csc || !v4l2_is_colorspace_valid(mp->colorspace))
638 mp->colorspace = vivid_colorspace_cap(dev);
639
640 if (!user_set_csc || !v4l2_is_xfer_func_valid(mp->xfer_func))
641 mp->xfer_func = vivid_xfer_func_cap(dev);
642
643 if (fmt->color_enc == TGP_COLOR_ENC_HSV) {
644 if (!user_set_csc || !v4l2_is_hsv_enc_valid(mp->hsv_enc))
645 mp->hsv_enc = vivid_hsv_enc_cap(dev);
646 } else if (fmt->color_enc == TGP_COLOR_ENC_YCBCR) {
647 if (!user_set_csc || !v4l2_is_ycbcr_enc_valid(mp->ycbcr_enc))
648 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
649 } else {
650 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
651 }
652
653 if (fmt->color_enc == TGP_COLOR_ENC_YCBCR ||
654 fmt->color_enc == TGP_COLOR_ENC_RGB) {
655 if (!user_set_csc || !v4l2_is_quant_valid(mp->quantization))
656 mp->quantization = vivid_quantization_cap(dev);
657 } else {
658 mp->quantization = vivid_quantization_cap(dev);
659 }
660
661 memset(mp->reserved, 0, sizeof(mp->reserved));
662 return 0;
663 }
664
vivid_s_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)665 int vivid_s_fmt_vid_cap(struct file *file, void *priv,
666 struct v4l2_format *f)
667 {
668 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
669 struct vivid_dev *dev = video_drvdata(file);
670 struct v4l2_rect *crop = &dev->crop_cap;
671 struct v4l2_rect *compose = &dev->compose_cap;
672 struct vb2_queue *q = &dev->vb_vid_cap_q;
673 int ret = vivid_try_fmt_vid_cap(file, priv, f);
674 unsigned factor = 1;
675 unsigned p;
676 unsigned i;
677
678 if (ret < 0)
679 return ret;
680
681 if (vb2_is_busy(q)) {
682 dprintk(dev, 1, "%s device busy\n", __func__);
683 return -EBUSY;
684 }
685
686 if (dev->overlay_cap_owner && dev->fb_cap.fmt.pixelformat != mp->pixelformat) {
687 dprintk(dev, 1, "overlay is active, can't change pixelformat\n");
688 return -EBUSY;
689 }
690
691 dev->fmt_cap = vivid_get_format(dev, mp->pixelformat);
692 if (V4L2_FIELD_HAS_T_OR_B(mp->field))
693 factor = 2;
694
695 /* Note: the webcam input doesn't support scaling, cropping or composing */
696
697 if (!vivid_is_webcam(dev) &&
698 (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) {
699 struct v4l2_rect r = { 0, 0, mp->width, mp->height };
700
701 if (dev->has_scaler_cap) {
702 if (dev->has_compose_cap)
703 v4l2_rect_map_inside(compose, &r);
704 else
705 *compose = r;
706 if (dev->has_crop_cap && !dev->has_compose_cap) {
707 struct v4l2_rect min_r = {
708 0, 0,
709 r.width / MAX_ZOOM,
710 factor * r.height / MAX_ZOOM
711 };
712 struct v4l2_rect max_r = {
713 0, 0,
714 r.width * MAX_ZOOM,
715 factor * r.height * MAX_ZOOM
716 };
717
718 v4l2_rect_set_min_size(crop, &min_r);
719 v4l2_rect_set_max_size(crop, &max_r);
720 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
721 } else if (dev->has_crop_cap) {
722 struct v4l2_rect min_r = {
723 0, 0,
724 compose->width / MAX_ZOOM,
725 factor * compose->height / MAX_ZOOM
726 };
727 struct v4l2_rect max_r = {
728 0, 0,
729 compose->width * MAX_ZOOM,
730 factor * compose->height * MAX_ZOOM
731 };
732
733 v4l2_rect_set_min_size(crop, &min_r);
734 v4l2_rect_set_max_size(crop, &max_r);
735 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
736 }
737 } else if (dev->has_crop_cap && !dev->has_compose_cap) {
738 r.height *= factor;
739 v4l2_rect_set_size_to(crop, &r);
740 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
741 r = *crop;
742 r.height /= factor;
743 v4l2_rect_set_size_to(compose, &r);
744 } else if (!dev->has_crop_cap) {
745 v4l2_rect_map_inside(compose, &r);
746 } else {
747 r.height *= factor;
748 v4l2_rect_set_max_size(crop, &r);
749 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
750 compose->top *= factor;
751 compose->height *= factor;
752 v4l2_rect_set_size_to(compose, crop);
753 v4l2_rect_map_inside(compose, &r);
754 compose->top /= factor;
755 compose->height /= factor;
756 }
757 } else if (vivid_is_webcam(dev)) {
758 /* Guaranteed to be a match */
759 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++)
760 if (webcam_sizes[i].width == mp->width &&
761 webcam_sizes[i].height == mp->height)
762 break;
763 dev->webcam_size_idx = i;
764 if (dev->webcam_ival_idx >= 2 * (VIVID_WEBCAM_SIZES - i))
765 dev->webcam_ival_idx = 2 * (VIVID_WEBCAM_SIZES - i) - 1;
766 vivid_update_format_cap(dev, false);
767 } else {
768 struct v4l2_rect r = { 0, 0, mp->width, mp->height };
769
770 v4l2_rect_set_size_to(compose, &r);
771 r.height *= factor;
772 v4l2_rect_set_size_to(crop, &r);
773 }
774
775 dev->fmt_cap_rect.width = mp->width;
776 dev->fmt_cap_rect.height = mp->height;
777 tpg_s_buf_height(&dev->tpg, mp->height);
778 tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc);
779 for (p = 0; p < tpg_g_buffers(&dev->tpg); p++)
780 tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline);
781 dev->field_cap = mp->field;
782 if (dev->field_cap == V4L2_FIELD_ALTERNATE)
783 tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true);
784 else
785 tpg_s_field(&dev->tpg, dev->field_cap, false);
786 tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap);
787 if (vivid_is_sdtv_cap(dev))
788 dev->tv_field_cap = mp->field;
789 tpg_update_mv_step(&dev->tpg);
790 dev->tpg.colorspace = mp->colorspace;
791 dev->tpg.xfer_func = mp->xfer_func;
792 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_YCBCR)
793 dev->tpg.ycbcr_enc = mp->ycbcr_enc;
794 else
795 dev->tpg.hsv_enc = mp->hsv_enc;
796 dev->tpg.quantization = mp->quantization;
797
798 return 0;
799 }
800
vidioc_g_fmt_vid_cap_mplane(struct file * file,void * priv,struct v4l2_format * f)801 int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv,
802 struct v4l2_format *f)
803 {
804 struct vivid_dev *dev = video_drvdata(file);
805
806 if (!dev->multiplanar)
807 return -ENOTTY;
808 return vivid_g_fmt_vid_cap(file, priv, f);
809 }
810
vidioc_try_fmt_vid_cap_mplane(struct file * file,void * priv,struct v4l2_format * f)811 int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
812 struct v4l2_format *f)
813 {
814 struct vivid_dev *dev = video_drvdata(file);
815
816 if (!dev->multiplanar)
817 return -ENOTTY;
818 return vivid_try_fmt_vid_cap(file, priv, f);
819 }
820
vidioc_s_fmt_vid_cap_mplane(struct file * file,void * priv,struct v4l2_format * f)821 int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv,
822 struct v4l2_format *f)
823 {
824 struct vivid_dev *dev = video_drvdata(file);
825
826 if (!dev->multiplanar)
827 return -ENOTTY;
828 return vivid_s_fmt_vid_cap(file, priv, f);
829 }
830
vidioc_g_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)831 int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
832 struct v4l2_format *f)
833 {
834 struct vivid_dev *dev = video_drvdata(file);
835
836 if (dev->multiplanar)
837 return -ENOTTY;
838 return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap);
839 }
840
vidioc_try_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)841 int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
842 struct v4l2_format *f)
843 {
844 struct vivid_dev *dev = video_drvdata(file);
845
846 if (dev->multiplanar)
847 return -ENOTTY;
848 return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap);
849 }
850
vidioc_s_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)851 int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
852 struct v4l2_format *f)
853 {
854 struct vivid_dev *dev = video_drvdata(file);
855
856 if (dev->multiplanar)
857 return -ENOTTY;
858 return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap);
859 }
860
vivid_vid_cap_g_selection(struct file * file,void * priv,struct v4l2_selection * sel)861 int vivid_vid_cap_g_selection(struct file *file, void *priv,
862 struct v4l2_selection *sel)
863 {
864 struct vivid_dev *dev = video_drvdata(file);
865
866 if (!dev->has_crop_cap && !dev->has_compose_cap)
867 return -ENOTTY;
868 if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
869 return -EINVAL;
870 if (vivid_is_webcam(dev))
871 return -ENODATA;
872
873 sel->r.left = sel->r.top = 0;
874 switch (sel->target) {
875 case V4L2_SEL_TGT_CROP:
876 if (!dev->has_crop_cap)
877 return -EINVAL;
878 sel->r = dev->crop_cap;
879 break;
880 case V4L2_SEL_TGT_CROP_DEFAULT:
881 case V4L2_SEL_TGT_CROP_BOUNDS:
882 if (!dev->has_crop_cap)
883 return -EINVAL;
884 sel->r = dev->src_rect;
885 break;
886 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
887 if (!dev->has_compose_cap)
888 return -EINVAL;
889 sel->r = vivid_max_rect;
890 break;
891 case V4L2_SEL_TGT_COMPOSE:
892 if (!dev->has_compose_cap)
893 return -EINVAL;
894 sel->r = dev->compose_cap;
895 break;
896 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
897 if (!dev->has_compose_cap)
898 return -EINVAL;
899 sel->r = dev->fmt_cap_rect;
900 break;
901 default:
902 return -EINVAL;
903 }
904 return 0;
905 }
906
vivid_vid_cap_s_selection(struct file * file,void * fh,struct v4l2_selection * s)907 int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
908 {
909 struct vivid_dev *dev = video_drvdata(file);
910 struct v4l2_rect *crop = &dev->crop_cap;
911 struct v4l2_rect *compose = &dev->compose_cap;
912 unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
913 int ret;
914
915 if (!dev->has_crop_cap && !dev->has_compose_cap)
916 return -ENOTTY;
917 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
918 return -EINVAL;
919 if (vivid_is_webcam(dev))
920 return -ENODATA;
921
922 switch (s->target) {
923 case V4L2_SEL_TGT_CROP:
924 if (!dev->has_crop_cap)
925 return -EINVAL;
926 ret = vivid_vid_adjust_sel(s->flags, &s->r);
927 if (ret)
928 return ret;
929 v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
930 v4l2_rect_set_max_size(&s->r, &dev->src_rect);
931 v4l2_rect_map_inside(&s->r, &dev->crop_bounds_cap);
932 s->r.top /= factor;
933 s->r.height /= factor;
934 if (dev->has_scaler_cap) {
935 struct v4l2_rect fmt = dev->fmt_cap_rect;
936 struct v4l2_rect max_rect = {
937 0, 0,
938 s->r.width * MAX_ZOOM,
939 s->r.height * MAX_ZOOM
940 };
941 struct v4l2_rect min_rect = {
942 0, 0,
943 s->r.width / MAX_ZOOM,
944 s->r.height / MAX_ZOOM
945 };
946
947 v4l2_rect_set_min_size(&fmt, &min_rect);
948 if (!dev->has_compose_cap)
949 v4l2_rect_set_max_size(&fmt, &max_rect);
950 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) &&
951 vb2_is_busy(&dev->vb_vid_cap_q))
952 return -EBUSY;
953 if (dev->has_compose_cap) {
954 v4l2_rect_set_min_size(compose, &min_rect);
955 v4l2_rect_set_max_size(compose, &max_rect);
956 }
957 dev->fmt_cap_rect = fmt;
958 tpg_s_buf_height(&dev->tpg, fmt.height);
959 } else if (dev->has_compose_cap) {
960 struct v4l2_rect fmt = dev->fmt_cap_rect;
961
962 v4l2_rect_set_min_size(&fmt, &s->r);
963 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) &&
964 vb2_is_busy(&dev->vb_vid_cap_q))
965 return -EBUSY;
966 dev->fmt_cap_rect = fmt;
967 tpg_s_buf_height(&dev->tpg, fmt.height);
968 v4l2_rect_set_size_to(compose, &s->r);
969 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect);
970 } else {
971 if (!v4l2_rect_same_size(&s->r, &dev->fmt_cap_rect) &&
972 vb2_is_busy(&dev->vb_vid_cap_q))
973 return -EBUSY;
974 v4l2_rect_set_size_to(&dev->fmt_cap_rect, &s->r);
975 v4l2_rect_set_size_to(compose, &s->r);
976 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect);
977 tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height);
978 }
979 s->r.top *= factor;
980 s->r.height *= factor;
981 *crop = s->r;
982 break;
983 case V4L2_SEL_TGT_COMPOSE:
984 if (!dev->has_compose_cap)
985 return -EINVAL;
986 ret = vivid_vid_adjust_sel(s->flags, &s->r);
987 if (ret)
988 return ret;
989 v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
990 v4l2_rect_set_max_size(&s->r, &dev->fmt_cap_rect);
991 if (dev->has_scaler_cap) {
992 struct v4l2_rect max_rect = {
993 0, 0,
994 dev->src_rect.width * MAX_ZOOM,
995 (dev->src_rect.height / factor) * MAX_ZOOM
996 };
997
998 v4l2_rect_set_max_size(&s->r, &max_rect);
999 if (dev->has_crop_cap) {
1000 struct v4l2_rect min_rect = {
1001 0, 0,
1002 s->r.width / MAX_ZOOM,
1003 (s->r.height * factor) / MAX_ZOOM
1004 };
1005 struct v4l2_rect max_rect = {
1006 0, 0,
1007 s->r.width * MAX_ZOOM,
1008 (s->r.height * factor) * MAX_ZOOM
1009 };
1010
1011 v4l2_rect_set_min_size(crop, &min_rect);
1012 v4l2_rect_set_max_size(crop, &max_rect);
1013 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
1014 }
1015 } else if (dev->has_crop_cap) {
1016 s->r.top *= factor;
1017 s->r.height *= factor;
1018 v4l2_rect_set_max_size(&s->r, &dev->src_rect);
1019 v4l2_rect_set_size_to(crop, &s->r);
1020 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
1021 s->r.top /= factor;
1022 s->r.height /= factor;
1023 } else {
1024 v4l2_rect_set_size_to(&s->r, &dev->src_rect);
1025 s->r.height /= factor;
1026 }
1027 v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
1028 if (dev->bitmap_cap && (compose->width != s->r.width ||
1029 compose->height != s->r.height)) {
1030 vfree(dev->bitmap_cap);
1031 dev->bitmap_cap = NULL;
1032 }
1033 *compose = s->r;
1034 break;
1035 default:
1036 return -EINVAL;
1037 }
1038
1039 tpg_s_crop_compose(&dev->tpg, crop, compose);
1040 return 0;
1041 }
1042
vivid_vid_cap_g_pixelaspect(struct file * file,void * priv,int type,struct v4l2_fract * f)1043 int vivid_vid_cap_g_pixelaspect(struct file *file, void *priv,
1044 int type, struct v4l2_fract *f)
1045 {
1046 struct vivid_dev *dev = video_drvdata(file);
1047
1048 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1049 return -EINVAL;
1050
1051 switch (vivid_get_pixel_aspect(dev)) {
1052 case TPG_PIXEL_ASPECT_NTSC:
1053 f->numerator = 11;
1054 f->denominator = 10;
1055 break;
1056 case TPG_PIXEL_ASPECT_PAL:
1057 f->numerator = 54;
1058 f->denominator = 59;
1059 break;
1060 default:
1061 break;
1062 }
1063 return 0;
1064 }
1065
vidioc_enum_fmt_vid_overlay(struct file * file,void * priv,struct v4l2_fmtdesc * f)1066 int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv,
1067 struct v4l2_fmtdesc *f)
1068 {
1069 struct vivid_dev *dev = video_drvdata(file);
1070 const struct vivid_fmt *fmt;
1071
1072 if (dev->multiplanar)
1073 return -ENOTTY;
1074
1075 if (f->index >= ARRAY_SIZE(formats_ovl))
1076 return -EINVAL;
1077
1078 fmt = &formats_ovl[f->index];
1079
1080 f->pixelformat = fmt->fourcc;
1081 return 0;
1082 }
1083
vidioc_g_fmt_vid_overlay(struct file * file,void * priv,struct v4l2_format * f)1084 int vidioc_g_fmt_vid_overlay(struct file *file, void *priv,
1085 struct v4l2_format *f)
1086 {
1087 struct vivid_dev *dev = video_drvdata(file);
1088 const struct v4l2_rect *compose = &dev->compose_cap;
1089 struct v4l2_window *win = &f->fmt.win;
1090 unsigned clipcount = win->clipcount;
1091
1092 if (dev->multiplanar)
1093 return -ENOTTY;
1094
1095 win->w.top = dev->overlay_cap_top;
1096 win->w.left = dev->overlay_cap_left;
1097 win->w.width = compose->width;
1098 win->w.height = compose->height;
1099 win->field = dev->overlay_cap_field;
1100 win->clipcount = dev->clipcount_cap;
1101 if (clipcount > dev->clipcount_cap)
1102 clipcount = dev->clipcount_cap;
1103 if (dev->bitmap_cap == NULL)
1104 win->bitmap = NULL;
1105 else if (win->bitmap) {
1106 if (copy_to_user(win->bitmap, dev->bitmap_cap,
1107 ((compose->width + 7) / 8) * compose->height))
1108 return -EFAULT;
1109 }
1110 if (clipcount && win->clips)
1111 memcpy(win->clips, dev->clips_cap,
1112 clipcount * sizeof(dev->clips_cap[0]));
1113 return 0;
1114 }
1115
vidioc_try_fmt_vid_overlay(struct file * file,void * priv,struct v4l2_format * f)1116 int vidioc_try_fmt_vid_overlay(struct file *file, void *priv,
1117 struct v4l2_format *f)
1118 {
1119 struct vivid_dev *dev = video_drvdata(file);
1120 const struct v4l2_rect *compose = &dev->compose_cap;
1121 struct v4l2_window *win = &f->fmt.win;
1122 int i, j;
1123
1124 if (dev->multiplanar)
1125 return -ENOTTY;
1126
1127 win->w.left = clamp_t(int, win->w.left,
1128 -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width);
1129 win->w.top = clamp_t(int, win->w.top,
1130 -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height);
1131 win->w.width = compose->width;
1132 win->w.height = compose->height;
1133 if (win->field != V4L2_FIELD_BOTTOM && win->field != V4L2_FIELD_TOP)
1134 win->field = V4L2_FIELD_ANY;
1135 win->chromakey = 0;
1136 win->global_alpha = 0;
1137 if (win->clipcount && !win->clips)
1138 win->clipcount = 0;
1139 if (win->clipcount > MAX_CLIPS)
1140 win->clipcount = MAX_CLIPS;
1141 if (win->clipcount) {
1142 memcpy(dev->try_clips_cap, win->clips,
1143 win->clipcount * sizeof(dev->clips_cap[0]));
1144 for (i = 0; i < win->clipcount; i++) {
1145 struct v4l2_rect *r = &dev->try_clips_cap[i].c;
1146
1147 r->top = clamp_t(s32, r->top, 0, dev->fb_cap.fmt.height - 1);
1148 r->height = clamp_t(s32, r->height, 1, dev->fb_cap.fmt.height - r->top);
1149 r->left = clamp_t(u32, r->left, 0, dev->fb_cap.fmt.width - 1);
1150 r->width = clamp_t(u32, r->width, 1, dev->fb_cap.fmt.width - r->left);
1151 }
1152 /*
1153 * Yeah, so sue me, it's an O(n^2) algorithm. But n is a small
1154 * number and it's typically a one-time deal.
1155 */
1156 for (i = 0; i < win->clipcount - 1; i++) {
1157 struct v4l2_rect *r1 = &dev->try_clips_cap[i].c;
1158
1159 for (j = i + 1; j < win->clipcount; j++) {
1160 struct v4l2_rect *r2 = &dev->try_clips_cap[j].c;
1161
1162 if (v4l2_rect_overlap(r1, r2))
1163 return -EINVAL;
1164 }
1165 }
1166 memcpy(win->clips, dev->try_clips_cap,
1167 win->clipcount * sizeof(dev->clips_cap[0]));
1168 }
1169 return 0;
1170 }
1171
vidioc_s_fmt_vid_overlay(struct file * file,void * priv,struct v4l2_format * f)1172 int vidioc_s_fmt_vid_overlay(struct file *file, void *priv,
1173 struct v4l2_format *f)
1174 {
1175 struct vivid_dev *dev = video_drvdata(file);
1176 const struct v4l2_rect *compose = &dev->compose_cap;
1177 struct v4l2_window *win = &f->fmt.win;
1178 int ret = vidioc_try_fmt_vid_overlay(file, priv, f);
1179 unsigned bitmap_size = ((compose->width + 7) / 8) * compose->height;
1180 unsigned clips_size = win->clipcount * sizeof(dev->clips_cap[0]);
1181 void *new_bitmap = NULL;
1182
1183 if (ret)
1184 return ret;
1185
1186 if (win->bitmap) {
1187 new_bitmap = vzalloc(bitmap_size);
1188
1189 if (new_bitmap == NULL)
1190 return -ENOMEM;
1191 if (copy_from_user(new_bitmap, win->bitmap, bitmap_size)) {
1192 vfree(new_bitmap);
1193 return -EFAULT;
1194 }
1195 }
1196
1197 dev->overlay_cap_top = win->w.top;
1198 dev->overlay_cap_left = win->w.left;
1199 dev->overlay_cap_field = win->field;
1200 vfree(dev->bitmap_cap);
1201 dev->bitmap_cap = new_bitmap;
1202 dev->clipcount_cap = win->clipcount;
1203 if (dev->clipcount_cap)
1204 memcpy(dev->clips_cap, dev->try_clips_cap, clips_size);
1205 return 0;
1206 }
1207
vivid_vid_cap_overlay(struct file * file,void * fh,unsigned i)1208 int vivid_vid_cap_overlay(struct file *file, void *fh, unsigned i)
1209 {
1210 struct vivid_dev *dev = video_drvdata(file);
1211
1212 if (dev->multiplanar)
1213 return -ENOTTY;
1214
1215 if (i && dev->fb_vbase_cap == NULL)
1216 return -EINVAL;
1217
1218 if (i && dev->fb_cap.fmt.pixelformat != dev->fmt_cap->fourcc) {
1219 dprintk(dev, 1, "mismatch between overlay and video capture pixelformats\n");
1220 return -EINVAL;
1221 }
1222
1223 if (dev->overlay_cap_owner && dev->overlay_cap_owner != fh)
1224 return -EBUSY;
1225 dev->overlay_cap_owner = i ? fh : NULL;
1226 return 0;
1227 }
1228
vivid_vid_cap_g_fbuf(struct file * file,void * fh,struct v4l2_framebuffer * a)1229 int vivid_vid_cap_g_fbuf(struct file *file, void *fh,
1230 struct v4l2_framebuffer *a)
1231 {
1232 struct vivid_dev *dev = video_drvdata(file);
1233
1234 if (dev->multiplanar)
1235 return -ENOTTY;
1236
1237 *a = dev->fb_cap;
1238 a->capability = V4L2_FBUF_CAP_BITMAP_CLIPPING |
1239 V4L2_FBUF_CAP_LIST_CLIPPING;
1240 a->flags = V4L2_FBUF_FLAG_PRIMARY;
1241 a->fmt.field = V4L2_FIELD_NONE;
1242 a->fmt.colorspace = V4L2_COLORSPACE_SRGB;
1243 a->fmt.priv = 0;
1244 return 0;
1245 }
1246
vivid_vid_cap_s_fbuf(struct file * file,void * fh,const struct v4l2_framebuffer * a)1247 int vivid_vid_cap_s_fbuf(struct file *file, void *fh,
1248 const struct v4l2_framebuffer *a)
1249 {
1250 struct vivid_dev *dev = video_drvdata(file);
1251 const struct vivid_fmt *fmt;
1252
1253 if (dev->multiplanar)
1254 return -ENOTTY;
1255
1256 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
1257 return -EPERM;
1258
1259 if (dev->overlay_cap_owner)
1260 return -EBUSY;
1261
1262 if (a->base == NULL) {
1263 dev->fb_cap.base = NULL;
1264 dev->fb_vbase_cap = NULL;
1265 return 0;
1266 }
1267
1268 if (a->fmt.width < 48 || a->fmt.height < 32)
1269 return -EINVAL;
1270 fmt = vivid_get_format(dev, a->fmt.pixelformat);
1271 if (!fmt || !fmt->can_do_overlay)
1272 return -EINVAL;
1273 if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8)
1274 return -EINVAL;
1275 if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage)
1276 return -EINVAL;
1277
1278 dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base);
1279 dev->fb_cap = *a;
1280 dev->overlay_cap_left = clamp_t(int, dev->overlay_cap_left,
1281 -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width);
1282 dev->overlay_cap_top = clamp_t(int, dev->overlay_cap_top,
1283 -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height);
1284 return 0;
1285 }
1286
1287 static const struct v4l2_audio vivid_audio_inputs[] = {
1288 { 0, "TV", V4L2_AUDCAP_STEREO },
1289 { 1, "Line-In", V4L2_AUDCAP_STEREO },
1290 };
1291
vidioc_enum_input(struct file * file,void * priv,struct v4l2_input * inp)1292 int vidioc_enum_input(struct file *file, void *priv,
1293 struct v4l2_input *inp)
1294 {
1295 struct vivid_dev *dev = video_drvdata(file);
1296
1297 if (inp->index >= dev->num_inputs)
1298 return -EINVAL;
1299
1300 inp->type = V4L2_INPUT_TYPE_CAMERA;
1301 switch (dev->input_type[inp->index]) {
1302 case WEBCAM:
1303 snprintf(inp->name, sizeof(inp->name), "Webcam %u",
1304 dev->input_name_counter[inp->index]);
1305 inp->capabilities = 0;
1306 break;
1307 case TV:
1308 snprintf(inp->name, sizeof(inp->name), "TV %u",
1309 dev->input_name_counter[inp->index]);
1310 inp->type = V4L2_INPUT_TYPE_TUNER;
1311 inp->std = V4L2_STD_ALL;
1312 if (dev->has_audio_inputs)
1313 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1;
1314 inp->capabilities = V4L2_IN_CAP_STD;
1315 break;
1316 case SVID:
1317 snprintf(inp->name, sizeof(inp->name), "S-Video %u",
1318 dev->input_name_counter[inp->index]);
1319 inp->std = V4L2_STD_ALL;
1320 if (dev->has_audio_inputs)
1321 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1;
1322 inp->capabilities = V4L2_IN_CAP_STD;
1323 break;
1324 case HDMI:
1325 snprintf(inp->name, sizeof(inp->name), "HDMI %u",
1326 dev->input_name_counter[inp->index]);
1327 inp->capabilities = V4L2_IN_CAP_DV_TIMINGS;
1328 if (dev->edid_blocks == 0 ||
1329 dev->dv_timings_signal_mode[dev->input] == NO_SIGNAL)
1330 inp->status |= V4L2_IN_ST_NO_SIGNAL;
1331 else if (dev->dv_timings_signal_mode[dev->input] == NO_LOCK ||
1332 dev->dv_timings_signal_mode[dev->input] == OUT_OF_RANGE)
1333 inp->status |= V4L2_IN_ST_NO_H_LOCK;
1334 break;
1335 }
1336 if (dev->sensor_hflip)
1337 inp->status |= V4L2_IN_ST_HFLIP;
1338 if (dev->sensor_vflip)
1339 inp->status |= V4L2_IN_ST_VFLIP;
1340 if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) {
1341 if (dev->std_signal_mode[dev->input] == NO_SIGNAL) {
1342 inp->status |= V4L2_IN_ST_NO_SIGNAL;
1343 } else if (dev->std_signal_mode[dev->input] == NO_LOCK) {
1344 inp->status |= V4L2_IN_ST_NO_H_LOCK;
1345 } else if (vivid_is_tv_cap(dev)) {
1346 switch (tpg_g_quality(&dev->tpg)) {
1347 case TPG_QUAL_GRAY:
1348 inp->status |= V4L2_IN_ST_COLOR_KILL;
1349 break;
1350 case TPG_QUAL_NOISE:
1351 inp->status |= V4L2_IN_ST_NO_H_LOCK;
1352 break;
1353 default:
1354 break;
1355 }
1356 }
1357 }
1358 return 0;
1359 }
1360
vidioc_g_input(struct file * file,void * priv,unsigned * i)1361 int vidioc_g_input(struct file *file, void *priv, unsigned *i)
1362 {
1363 struct vivid_dev *dev = video_drvdata(file);
1364
1365 *i = dev->input;
1366 return 0;
1367 }
1368
vidioc_s_input(struct file * file,void * priv,unsigned i)1369 int vidioc_s_input(struct file *file, void *priv, unsigned i)
1370 {
1371 struct vivid_dev *dev = video_drvdata(file);
1372 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt;
1373 unsigned brightness;
1374
1375 if (i >= dev->num_inputs)
1376 return -EINVAL;
1377
1378 if (i == dev->input)
1379 return 0;
1380
1381 if (vb2_is_busy(&dev->vb_vid_cap_q) ||
1382 vb2_is_busy(&dev->vb_vbi_cap_q) ||
1383 vb2_is_busy(&dev->vb_meta_cap_q))
1384 return -EBUSY;
1385
1386 dev->input = i;
1387 dev->vid_cap_dev.tvnorms = 0;
1388 if (dev->input_type[i] == TV || dev->input_type[i] == SVID) {
1389 dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1;
1390 dev->vid_cap_dev.tvnorms = V4L2_STD_ALL;
1391 }
1392 dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
1393 dev->meta_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
1394 vivid_update_format_cap(dev, false);
1395
1396 if (dev->colorspace) {
1397 switch (dev->input_type[i]) {
1398 case WEBCAM:
1399 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
1400 break;
1401 case TV:
1402 case SVID:
1403 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
1404 break;
1405 case HDMI:
1406 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) {
1407 if (dev->src_rect.width == 720 && dev->src_rect.height <= 576)
1408 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
1409 else
1410 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709);
1411 } else {
1412 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
1413 }
1414 break;
1415 }
1416 }
1417
1418 /*
1419 * Modify the brightness range depending on the input.
1420 * This makes it easy to use vivid to test if applications can
1421 * handle control range modifications and is also how this is
1422 * typically used in practice as different inputs may be hooked
1423 * up to different receivers with different control ranges.
1424 */
1425 brightness = 128 * i + dev->input_brightness[i];
1426 v4l2_ctrl_modify_range(dev->brightness,
1427 128 * i, 255 + 128 * i, 1, 128 + 128 * i);
1428 v4l2_ctrl_s_ctrl(dev->brightness, brightness);
1429
1430 /* Restore per-input states. */
1431 v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode,
1432 vivid_is_hdmi_cap(dev));
1433 v4l2_ctrl_activate(dev->ctrl_dv_timings, vivid_is_hdmi_cap(dev) &&
1434 dev->dv_timings_signal_mode[dev->input] ==
1435 SELECTED_DV_TIMINGS);
1436 v4l2_ctrl_activate(dev->ctrl_std_signal_mode, vivid_is_sdtv_cap(dev));
1437 v4l2_ctrl_activate(dev->ctrl_standard, vivid_is_sdtv_cap(dev) &&
1438 dev->std_signal_mode[dev->input]);
1439
1440 if (vivid_is_hdmi_cap(dev)) {
1441 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings_signal_mode,
1442 dev->dv_timings_signal_mode[dev->input]);
1443 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings,
1444 dev->query_dv_timings[dev->input]);
1445 } else if (vivid_is_sdtv_cap(dev)) {
1446 v4l2_ctrl_s_ctrl(dev->ctrl_std_signal_mode,
1447 dev->std_signal_mode[dev->input]);
1448 v4l2_ctrl_s_ctrl(dev->ctrl_standard,
1449 dev->std_signal_mode[dev->input]);
1450 }
1451
1452 return 0;
1453 }
1454
vidioc_enumaudio(struct file * file,void * fh,struct v4l2_audio * vin)1455 int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
1456 {
1457 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
1458 return -EINVAL;
1459 *vin = vivid_audio_inputs[vin->index];
1460 return 0;
1461 }
1462
vidioc_g_audio(struct file * file,void * fh,struct v4l2_audio * vin)1463 int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
1464 {
1465 struct vivid_dev *dev = video_drvdata(file);
1466
1467 if (!vivid_is_sdtv_cap(dev))
1468 return -EINVAL;
1469 *vin = vivid_audio_inputs[dev->tv_audio_input];
1470 return 0;
1471 }
1472
vidioc_s_audio(struct file * file,void * fh,const struct v4l2_audio * vin)1473 int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin)
1474 {
1475 struct vivid_dev *dev = video_drvdata(file);
1476
1477 if (!vivid_is_sdtv_cap(dev))
1478 return -EINVAL;
1479 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
1480 return -EINVAL;
1481 dev->tv_audio_input = vin->index;
1482 return 0;
1483 }
1484
vivid_video_g_frequency(struct file * file,void * fh,struct v4l2_frequency * vf)1485 int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
1486 {
1487 struct vivid_dev *dev = video_drvdata(file);
1488
1489 if (vf->tuner != 0)
1490 return -EINVAL;
1491 vf->frequency = dev->tv_freq;
1492 return 0;
1493 }
1494
vivid_video_s_frequency(struct file * file,void * fh,const struct v4l2_frequency * vf)1495 int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
1496 {
1497 struct vivid_dev *dev = video_drvdata(file);
1498
1499 if (vf->tuner != 0)
1500 return -EINVAL;
1501 dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ);
1502 if (vivid_is_tv_cap(dev))
1503 vivid_update_quality(dev);
1504 return 0;
1505 }
1506
vivid_video_s_tuner(struct file * file,void * fh,const struct v4l2_tuner * vt)1507 int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
1508 {
1509 struct vivid_dev *dev = video_drvdata(file);
1510
1511 if (vt->index != 0)
1512 return -EINVAL;
1513 if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2)
1514 return -EINVAL;
1515 dev->tv_audmode = vt->audmode;
1516 return 0;
1517 }
1518
vivid_video_g_tuner(struct file * file,void * fh,struct v4l2_tuner * vt)1519 int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
1520 {
1521 struct vivid_dev *dev = video_drvdata(file);
1522 enum tpg_quality qual;
1523
1524 if (vt->index != 0)
1525 return -EINVAL;
1526
1527 vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO |
1528 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
1529 vt->audmode = dev->tv_audmode;
1530 vt->rangelow = MIN_TV_FREQ;
1531 vt->rangehigh = MAX_TV_FREQ;
1532 qual = vivid_get_quality(dev, &vt->afc);
1533 if (qual == TPG_QUAL_COLOR)
1534 vt->signal = 0xffff;
1535 else if (qual == TPG_QUAL_GRAY)
1536 vt->signal = 0x8000;
1537 else
1538 vt->signal = 0;
1539 if (qual == TPG_QUAL_NOISE) {
1540 vt->rxsubchans = 0;
1541 } else if (qual == TPG_QUAL_GRAY) {
1542 vt->rxsubchans = V4L2_TUNER_SUB_MONO;
1543 } else {
1544 unsigned int channel_nr = dev->tv_freq / (6 * 16);
1545 unsigned int options =
1546 (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) ? 4 : 3;
1547
1548 switch (channel_nr % options) {
1549 case 0:
1550 vt->rxsubchans = V4L2_TUNER_SUB_MONO;
1551 break;
1552 case 1:
1553 vt->rxsubchans = V4L2_TUNER_SUB_STEREO;
1554 break;
1555 case 2:
1556 if (dev->std_cap[dev->input] & V4L2_STD_NTSC_M)
1557 vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP;
1558 else
1559 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
1560 break;
1561 case 3:
1562 vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP;
1563 break;
1564 }
1565 }
1566 strscpy(vt->name, "TV Tuner", sizeof(vt->name));
1567 return 0;
1568 }
1569
1570 /* Must remain in sync with the vivid_ctrl_standard_strings array */
1571 const v4l2_std_id vivid_standard[] = {
1572 V4L2_STD_NTSC_M,
1573 V4L2_STD_NTSC_M_JP,
1574 V4L2_STD_NTSC_M_KR,
1575 V4L2_STD_NTSC_443,
1576 V4L2_STD_PAL_BG | V4L2_STD_PAL_H,
1577 V4L2_STD_PAL_I,
1578 V4L2_STD_PAL_DK,
1579 V4L2_STD_PAL_M,
1580 V4L2_STD_PAL_N,
1581 V4L2_STD_PAL_Nc,
1582 V4L2_STD_PAL_60,
1583 V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H,
1584 V4L2_STD_SECAM_DK,
1585 V4L2_STD_SECAM_L,
1586 V4L2_STD_SECAM_LC,
1587 V4L2_STD_UNKNOWN
1588 };
1589
1590 /* Must remain in sync with the vivid_standard array */
1591 const char * const vivid_ctrl_standard_strings[] = {
1592 "NTSC-M",
1593 "NTSC-M-JP",
1594 "NTSC-M-KR",
1595 "NTSC-443",
1596 "PAL-BGH",
1597 "PAL-I",
1598 "PAL-DK",
1599 "PAL-M",
1600 "PAL-N",
1601 "PAL-Nc",
1602 "PAL-60",
1603 "SECAM-BGH",
1604 "SECAM-DK",
1605 "SECAM-L",
1606 "SECAM-Lc",
1607 NULL,
1608 };
1609
vidioc_querystd(struct file * file,void * priv,v4l2_std_id * id)1610 int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id)
1611 {
1612 struct vivid_dev *dev = video_drvdata(file);
1613 unsigned int last = dev->query_std_last[dev->input];
1614
1615 if (!vivid_is_sdtv_cap(dev))
1616 return -ENODATA;
1617 if (dev->std_signal_mode[dev->input] == NO_SIGNAL ||
1618 dev->std_signal_mode[dev->input] == NO_LOCK) {
1619 *id = V4L2_STD_UNKNOWN;
1620 return 0;
1621 }
1622 if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) {
1623 *id = V4L2_STD_UNKNOWN;
1624 } else if (dev->std_signal_mode[dev->input] == CURRENT_STD) {
1625 *id = dev->std_cap[dev->input];
1626 } else if (dev->std_signal_mode[dev->input] == SELECTED_STD) {
1627 *id = dev->query_std[dev->input];
1628 } else {
1629 *id = vivid_standard[last];
1630 dev->query_std_last[dev->input] =
1631 (last + 1) % ARRAY_SIZE(vivid_standard);
1632 }
1633
1634 return 0;
1635 }
1636
vivid_vid_cap_s_std(struct file * file,void * priv,v4l2_std_id id)1637 int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id)
1638 {
1639 struct vivid_dev *dev = video_drvdata(file);
1640
1641 if (!vivid_is_sdtv_cap(dev))
1642 return -ENODATA;
1643 if (dev->std_cap[dev->input] == id)
1644 return 0;
1645 if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
1646 return -EBUSY;
1647 dev->std_cap[dev->input] = id;
1648 vivid_update_format_cap(dev, false);
1649 return 0;
1650 }
1651
find_aspect_ratio(u32 width,u32 height,u32 * num,u32 * denom)1652 static void find_aspect_ratio(u32 width, u32 height,
1653 u32 *num, u32 *denom)
1654 {
1655 if (!(height % 3) && ((height * 4 / 3) == width)) {
1656 *num = 4;
1657 *denom = 3;
1658 } else if (!(height % 9) && ((height * 16 / 9) == width)) {
1659 *num = 16;
1660 *denom = 9;
1661 } else if (!(height % 10) && ((height * 16 / 10) == width)) {
1662 *num = 16;
1663 *denom = 10;
1664 } else if (!(height % 4) && ((height * 5 / 4) == width)) {
1665 *num = 5;
1666 *denom = 4;
1667 } else if (!(height % 9) && ((height * 15 / 9) == width)) {
1668 *num = 15;
1669 *denom = 9;
1670 } else { /* default to 16:9 */
1671 *num = 16;
1672 *denom = 9;
1673 }
1674 }
1675
valid_cvt_gtf_timings(struct v4l2_dv_timings * timings)1676 static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings)
1677 {
1678 struct v4l2_bt_timings *bt = &timings->bt;
1679 u32 total_h_pixel;
1680 u32 total_v_lines;
1681 u32 h_freq;
1682
1683 if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap,
1684 NULL, NULL))
1685 return false;
1686
1687 total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt);
1688 total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt);
1689
1690 h_freq = (u32)bt->pixelclock / total_h_pixel;
1691
1692 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) {
1693 if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width,
1694 bt->polarities, bt->interlaced, timings))
1695 return true;
1696 }
1697
1698 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) {
1699 struct v4l2_fract aspect_ratio;
1700
1701 find_aspect_ratio(bt->width, bt->height,
1702 &aspect_ratio.numerator,
1703 &aspect_ratio.denominator);
1704 if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync,
1705 bt->polarities, bt->interlaced,
1706 aspect_ratio, timings))
1707 return true;
1708 }
1709 return false;
1710 }
1711
vivid_vid_cap_s_dv_timings(struct file * file,void * _fh,struct v4l2_dv_timings * timings)1712 int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh,
1713 struct v4l2_dv_timings *timings)
1714 {
1715 struct vivid_dev *dev = video_drvdata(file);
1716
1717 if (!vivid_is_hdmi_cap(dev))
1718 return -ENODATA;
1719 if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap,
1720 0, NULL, NULL) &&
1721 !valid_cvt_gtf_timings(timings))
1722 return -EINVAL;
1723
1724 if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap[dev->input],
1725 0, false))
1726 return 0;
1727 if (vb2_is_busy(&dev->vb_vid_cap_q))
1728 return -EBUSY;
1729
1730 dev->dv_timings_cap[dev->input] = *timings;
1731 vivid_update_format_cap(dev, false);
1732 return 0;
1733 }
1734
vidioc_query_dv_timings(struct file * file,void * _fh,struct v4l2_dv_timings * timings)1735 int vidioc_query_dv_timings(struct file *file, void *_fh,
1736 struct v4l2_dv_timings *timings)
1737 {
1738 struct vivid_dev *dev = video_drvdata(file);
1739 unsigned int input = dev->input;
1740 unsigned int last = dev->query_dv_timings_last[input];
1741
1742 if (!vivid_is_hdmi_cap(dev))
1743 return -ENODATA;
1744 if (dev->dv_timings_signal_mode[input] == NO_SIGNAL ||
1745 dev->edid_blocks == 0)
1746 return -ENOLINK;
1747 if (dev->dv_timings_signal_mode[input] == NO_LOCK)
1748 return -ENOLCK;
1749 if (dev->dv_timings_signal_mode[input] == OUT_OF_RANGE) {
1750 timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2;
1751 return -ERANGE;
1752 }
1753 if (dev->dv_timings_signal_mode[input] == CURRENT_DV_TIMINGS) {
1754 *timings = dev->dv_timings_cap[input];
1755 } else if (dev->dv_timings_signal_mode[input] ==
1756 SELECTED_DV_TIMINGS) {
1757 *timings =
1758 v4l2_dv_timings_presets[dev->query_dv_timings[input]];
1759 } else {
1760 *timings =
1761 v4l2_dv_timings_presets[last];
1762 dev->query_dv_timings_last[input] =
1763 (last + 1) % dev->query_dv_timings_size;
1764 }
1765 return 0;
1766 }
1767
vidioc_s_edid(struct file * file,void * _fh,struct v4l2_edid * edid)1768 int vidioc_s_edid(struct file *file, void *_fh,
1769 struct v4l2_edid *edid)
1770 {
1771 struct vivid_dev *dev = video_drvdata(file);
1772 u16 phys_addr;
1773 u32 display_present = 0;
1774 unsigned int i, j;
1775 int ret;
1776
1777 memset(edid->reserved, 0, sizeof(edid->reserved));
1778 if (edid->pad >= dev->num_inputs)
1779 return -EINVAL;
1780 if (dev->input_type[edid->pad] != HDMI || edid->start_block)
1781 return -EINVAL;
1782 if (edid->blocks == 0) {
1783 dev->edid_blocks = 0;
1784 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0);
1785 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0);
1786 phys_addr = CEC_PHYS_ADDR_INVALID;
1787 goto set_phys_addr;
1788 }
1789 if (edid->blocks > dev->edid_max_blocks) {
1790 edid->blocks = dev->edid_max_blocks;
1791 return -E2BIG;
1792 }
1793 phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL);
1794 ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL);
1795 if (ret)
1796 return ret;
1797
1798 if (vb2_is_busy(&dev->vb_vid_cap_q))
1799 return -EBUSY;
1800
1801 dev->edid_blocks = edid->blocks;
1802 memcpy(dev->edid, edid->edid, edid->blocks * 128);
1803
1804 for (i = 0, j = 0; i < dev->num_outputs; i++)
1805 if (dev->output_type[i] == HDMI)
1806 display_present |=
1807 dev->display_present[i] << j++;
1808
1809 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present);
1810 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present);
1811
1812 set_phys_addr:
1813 /* TODO: a proper hotplug detect cycle should be emulated here */
1814 cec_s_phys_addr(dev->cec_rx_adap, phys_addr, false);
1815
1816 for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
1817 cec_s_phys_addr(dev->cec_tx_adap[i],
1818 dev->display_present[i] ?
1819 v4l2_phys_addr_for_input(phys_addr, i + 1) :
1820 CEC_PHYS_ADDR_INVALID,
1821 false);
1822 return 0;
1823 }
1824
vidioc_enum_framesizes(struct file * file,void * fh,struct v4l2_frmsizeenum * fsize)1825 int vidioc_enum_framesizes(struct file *file, void *fh,
1826 struct v4l2_frmsizeenum *fsize)
1827 {
1828 struct vivid_dev *dev = video_drvdata(file);
1829
1830 if (!vivid_is_webcam(dev) && !dev->has_scaler_cap)
1831 return -EINVAL;
1832 if (vivid_get_format(dev, fsize->pixel_format) == NULL)
1833 return -EINVAL;
1834 if (vivid_is_webcam(dev)) {
1835 if (fsize->index >= ARRAY_SIZE(webcam_sizes))
1836 return -EINVAL;
1837 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1838 fsize->discrete = webcam_sizes[fsize->index];
1839 return 0;
1840 }
1841 if (fsize->index)
1842 return -EINVAL;
1843 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
1844 fsize->stepwise.min_width = MIN_WIDTH;
1845 fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM;
1846 fsize->stepwise.step_width = 2;
1847 fsize->stepwise.min_height = MIN_HEIGHT;
1848 fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM;
1849 fsize->stepwise.step_height = 2;
1850 return 0;
1851 }
1852
1853 /* timeperframe is arbitrary and continuous */
vidioc_enum_frameintervals(struct file * file,void * priv,struct v4l2_frmivalenum * fival)1854 int vidioc_enum_frameintervals(struct file *file, void *priv,
1855 struct v4l2_frmivalenum *fival)
1856 {
1857 struct vivid_dev *dev = video_drvdata(file);
1858 const struct vivid_fmt *fmt;
1859 int i;
1860
1861 fmt = vivid_get_format(dev, fival->pixel_format);
1862 if (!fmt)
1863 return -EINVAL;
1864
1865 if (!vivid_is_webcam(dev)) {
1866 if (fival->index)
1867 return -EINVAL;
1868 if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM)
1869 return -EINVAL;
1870 if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM)
1871 return -EINVAL;
1872 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1873 fival->discrete = dev->timeperframe_vid_cap;
1874 return 0;
1875 }
1876
1877 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++)
1878 if (fival->width == webcam_sizes[i].width &&
1879 fival->height == webcam_sizes[i].height)
1880 break;
1881 if (i == ARRAY_SIZE(webcam_sizes))
1882 return -EINVAL;
1883 if (fival->index >= 2 * (VIVID_WEBCAM_SIZES - i))
1884 return -EINVAL;
1885 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1886 fival->discrete = webcam_intervals[fival->index];
1887 return 0;
1888 }
1889
vivid_vid_cap_g_parm(struct file * file,void * priv,struct v4l2_streamparm * parm)1890 int vivid_vid_cap_g_parm(struct file *file, void *priv,
1891 struct v4l2_streamparm *parm)
1892 {
1893 struct vivid_dev *dev = video_drvdata(file);
1894
1895 if (parm->type != (dev->multiplanar ?
1896 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
1897 V4L2_BUF_TYPE_VIDEO_CAPTURE))
1898 return -EINVAL;
1899
1900 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
1901 parm->parm.capture.timeperframe = dev->timeperframe_vid_cap;
1902 parm->parm.capture.readbuffers = 1;
1903 return 0;
1904 }
1905
vivid_vid_cap_s_parm(struct file * file,void * priv,struct v4l2_streamparm * parm)1906 int vivid_vid_cap_s_parm(struct file *file, void *priv,
1907 struct v4l2_streamparm *parm)
1908 {
1909 struct vivid_dev *dev = video_drvdata(file);
1910 unsigned ival_sz = 2 * (VIVID_WEBCAM_SIZES - dev->webcam_size_idx);
1911 struct v4l2_fract tpf;
1912 unsigned i;
1913
1914 if (parm->type != (dev->multiplanar ?
1915 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
1916 V4L2_BUF_TYPE_VIDEO_CAPTURE))
1917 return -EINVAL;
1918 if (!vivid_is_webcam(dev))
1919 return vivid_vid_cap_g_parm(file, priv, parm);
1920
1921 tpf = parm->parm.capture.timeperframe;
1922
1923 if (tpf.denominator == 0)
1924 tpf = webcam_intervals[ival_sz - 1];
1925 for (i = 0; i < ival_sz; i++)
1926 if (V4L2_FRACT_COMPARE(tpf, >=, webcam_intervals[i]))
1927 break;
1928 if (i == ival_sz)
1929 i = ival_sz - 1;
1930 dev->webcam_ival_idx = i;
1931 tpf = webcam_intervals[dev->webcam_ival_idx];
1932
1933 /* resync the thread's timings */
1934 dev->cap_seq_resync = true;
1935 dev->timeperframe_vid_cap = tpf;
1936 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
1937 parm->parm.capture.timeperframe = tpf;
1938 parm->parm.capture.readbuffers = 1;
1939 return 0;
1940 }
1941