1 /*
2 * Samsung TV Mixer driver
3 *
4 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
5 *
6 * Tomasz Stanislawski, <t.stanislaws@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published
10 * by the Free Software Foundation. either version 2 of the License,
11 * or (at your option) any later version
12 */
13
14 #include "mixer.h"
15
16 #include <media/v4l2-ioctl.h>
17 #include <linux/videodev2.h>
18 #include <linux/mm.h>
19 #include <linux/module.h>
20 #include <linux/version.h>
21 #include <linux/timer.h>
22 #include <media/videobuf2-dma-contig.h>
23
find_reg_callback(struct device * dev,void * p)24 static int find_reg_callback(struct device *dev, void *p)
25 {
26 struct v4l2_subdev **sd = p;
27
28 *sd = dev_get_drvdata(dev);
29 /* non-zero value stops iteration */
30 return 1;
31 }
32
find_and_register_subdev(struct mxr_device * mdev,char * module_name)33 static struct v4l2_subdev *find_and_register_subdev(
34 struct mxr_device *mdev, char *module_name)
35 {
36 struct device_driver *drv;
37 struct v4l2_subdev *sd = NULL;
38 int ret;
39
40 /* TODO: add waiting until probe is finished */
41 drv = driver_find(module_name, &platform_bus_type);
42 if (!drv) {
43 mxr_warn(mdev, "module %s is missing\n", module_name);
44 return NULL;
45 }
46 /* driver refcnt is increased, it is safe to iterate over devices */
47 ret = driver_for_each_device(drv, NULL, &sd, find_reg_callback);
48 /* ret == 0 means that find_reg_callback was never executed */
49 if (sd == NULL) {
50 mxr_warn(mdev, "module %s provides no subdev!\n", module_name);
51 goto done;
52 }
53 /* v4l2_device_register_subdev detects if sd is NULL */
54 ret = v4l2_device_register_subdev(&mdev->v4l2_dev, sd);
55 if (ret) {
56 mxr_warn(mdev, "failed to register subdev %s\n", sd->name);
57 sd = NULL;
58 }
59
60 done:
61 return sd;
62 }
63
mxr_acquire_video(struct mxr_device * mdev,struct mxr_output_conf * output_conf,int output_count)64 int __devinit mxr_acquire_video(struct mxr_device *mdev,
65 struct mxr_output_conf *output_conf, int output_count)
66 {
67 struct device *dev = mdev->dev;
68 struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
69 int i;
70 int ret = 0;
71 struct v4l2_subdev *sd;
72
73 strlcpy(v4l2_dev->name, dev_name(mdev->dev), sizeof(v4l2_dev->name));
74 /* prepare context for V4L2 device */
75 ret = v4l2_device_register(dev, v4l2_dev);
76 if (ret) {
77 mxr_err(mdev, "could not register v4l2 device.\n");
78 goto fail;
79 }
80
81 mdev->alloc_ctx = vb2_dma_contig_init_ctx(mdev->dev);
82 if (IS_ERR_OR_NULL(mdev->alloc_ctx)) {
83 mxr_err(mdev, "could not acquire vb2 allocator\n");
84 goto fail_v4l2_dev;
85 }
86
87 /* registering outputs */
88 mdev->output_cnt = 0;
89 for (i = 0; i < output_count; ++i) {
90 struct mxr_output_conf *conf = &output_conf[i];
91 struct mxr_output *out;
92
93 sd = find_and_register_subdev(mdev, conf->module_name);
94 /* trying to register next output */
95 if (sd == NULL)
96 continue;
97 out = kzalloc(sizeof *out, GFP_KERNEL);
98 if (out == NULL) {
99 mxr_err(mdev, "no memory for '%s'\n",
100 conf->output_name);
101 ret = -ENOMEM;
102 /* registered subdevs are removed in fail_v4l2_dev */
103 goto fail_output;
104 }
105 strlcpy(out->name, conf->output_name, sizeof(out->name));
106 out->sd = sd;
107 out->cookie = conf->cookie;
108 mdev->output[mdev->output_cnt++] = out;
109 mxr_info(mdev, "added output '%s' from module '%s'\n",
110 conf->output_name, conf->module_name);
111 /* checking if maximal number of outputs is reached */
112 if (mdev->output_cnt >= MXR_MAX_OUTPUTS)
113 break;
114 }
115
116 if (mdev->output_cnt == 0) {
117 mxr_err(mdev, "failed to register any output\n");
118 ret = -ENODEV;
119 /* skipping fail_output because there is nothing to free */
120 goto fail_vb2_allocator;
121 }
122
123 return 0;
124
125 fail_output:
126 /* kfree is NULL-safe */
127 for (i = 0; i < mdev->output_cnt; ++i)
128 kfree(mdev->output[i]);
129 memset(mdev->output, 0, sizeof mdev->output);
130
131 fail_vb2_allocator:
132 /* freeing allocator context */
133 vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
134
135 fail_v4l2_dev:
136 /* NOTE: automatically unregister all subdevs */
137 v4l2_device_unregister(v4l2_dev);
138
139 fail:
140 return ret;
141 }
142
mxr_release_video(struct mxr_device * mdev)143 void __devexit mxr_release_video(struct mxr_device *mdev)
144 {
145 int i;
146
147 /* kfree is NULL-safe */
148 for (i = 0; i < mdev->output_cnt; ++i)
149 kfree(mdev->output[i]);
150
151 vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
152 v4l2_device_unregister(&mdev->v4l2_dev);
153 }
154
mxr_querycap(struct file * file,void * priv,struct v4l2_capability * cap)155 static int mxr_querycap(struct file *file, void *priv,
156 struct v4l2_capability *cap)
157 {
158 struct mxr_layer *layer = video_drvdata(file);
159
160 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
161
162 strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof cap->driver);
163 strlcpy(cap->card, layer->vfd.name, sizeof cap->card);
164 sprintf(cap->bus_info, "%d", layer->idx);
165 cap->version = KERNEL_VERSION(0, 1, 0);
166 cap->capabilities = V4L2_CAP_STREAMING |
167 V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
168
169 return 0;
170 }
171
mxr_geometry_dump(struct mxr_device * mdev,struct mxr_geometry * geo)172 static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
173 {
174 mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
175 geo->src.full_width, geo->src.full_height);
176 mxr_dbg(mdev, "src.size = (%u, %u)\n",
177 geo->src.width, geo->src.height);
178 mxr_dbg(mdev, "src.offset = (%u, %u)\n",
179 geo->src.x_offset, geo->src.y_offset);
180 mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
181 geo->dst.full_width, geo->dst.full_height);
182 mxr_dbg(mdev, "dst.size = (%u, %u)\n",
183 geo->dst.width, geo->dst.height);
184 mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
185 geo->dst.x_offset, geo->dst.y_offset);
186 mxr_dbg(mdev, "ratio = (%u, %u)\n",
187 geo->x_ratio, geo->y_ratio);
188 }
189
mxr_layer_default_geo(struct mxr_layer * layer)190 static void mxr_layer_default_geo(struct mxr_layer *layer)
191 {
192 struct mxr_device *mdev = layer->mdev;
193 struct v4l2_mbus_framefmt mbus_fmt;
194
195 memset(&layer->geo, 0, sizeof layer->geo);
196
197 mxr_get_mbus_fmt(mdev, &mbus_fmt);
198
199 layer->geo.dst.full_width = mbus_fmt.width;
200 layer->geo.dst.full_height = mbus_fmt.height;
201 layer->geo.dst.width = layer->geo.dst.full_width;
202 layer->geo.dst.height = layer->geo.dst.full_height;
203 layer->geo.dst.field = mbus_fmt.field;
204
205 layer->geo.src.full_width = mbus_fmt.width;
206 layer->geo.src.full_height = mbus_fmt.height;
207 layer->geo.src.width = layer->geo.src.full_width;
208 layer->geo.src.height = layer->geo.src.full_height;
209
210 mxr_geometry_dump(mdev, &layer->geo);
211 layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
212 mxr_geometry_dump(mdev, &layer->geo);
213 }
214
mxr_layer_update_output(struct mxr_layer * layer)215 static void mxr_layer_update_output(struct mxr_layer *layer)
216 {
217 struct mxr_device *mdev = layer->mdev;
218 struct v4l2_mbus_framefmt mbus_fmt;
219
220 mxr_get_mbus_fmt(mdev, &mbus_fmt);
221 /* checking if update is needed */
222 if (layer->geo.dst.full_width == mbus_fmt.width &&
223 layer->geo.dst.full_height == mbus_fmt.width)
224 return;
225
226 layer->geo.dst.full_width = mbus_fmt.width;
227 layer->geo.dst.full_height = mbus_fmt.height;
228 layer->geo.dst.field = mbus_fmt.field;
229 layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
230
231 mxr_geometry_dump(mdev, &layer->geo);
232 }
233
234 static const struct mxr_format *find_format_by_fourcc(
235 struct mxr_layer *layer, unsigned long fourcc);
236 static const struct mxr_format *find_format_by_index(
237 struct mxr_layer *layer, unsigned long index);
238
mxr_enum_fmt(struct file * file,void * priv,struct v4l2_fmtdesc * f)239 static int mxr_enum_fmt(struct file *file, void *priv,
240 struct v4l2_fmtdesc *f)
241 {
242 struct mxr_layer *layer = video_drvdata(file);
243 struct mxr_device *mdev = layer->mdev;
244 const struct mxr_format *fmt;
245
246 mxr_dbg(mdev, "%s\n", __func__);
247 fmt = find_format_by_index(layer, f->index);
248 if (fmt == NULL)
249 return -EINVAL;
250
251 strlcpy(f->description, fmt->name, sizeof(f->description));
252 f->pixelformat = fmt->fourcc;
253
254 return 0;
255 }
256
divup(unsigned int divident,unsigned int divisor)257 static unsigned int divup(unsigned int divident, unsigned int divisor)
258 {
259 return (divident + divisor - 1) / divisor;
260 }
261
mxr_get_plane_size(const struct mxr_block * blk,unsigned int width,unsigned int height)262 unsigned long mxr_get_plane_size(const struct mxr_block *blk,
263 unsigned int width, unsigned int height)
264 {
265 unsigned int bl_width = divup(width, blk->width);
266 unsigned int bl_height = divup(height, blk->height);
267
268 return bl_width * bl_height * blk->size;
269 }
270
mxr_mplane_fill(struct v4l2_plane_pix_format * planes,const struct mxr_format * fmt,u32 width,u32 height)271 static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
272 const struct mxr_format *fmt, u32 width, u32 height)
273 {
274 int i;
275
276 /* checking if nothing to fill */
277 if (!planes)
278 return;
279
280 memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
281 for (i = 0; i < fmt->num_planes; ++i) {
282 struct v4l2_plane_pix_format *plane = planes
283 + fmt->plane2subframe[i];
284 const struct mxr_block *blk = &fmt->plane[i];
285 u32 bl_width = divup(width, blk->width);
286 u32 bl_height = divup(height, blk->height);
287 u32 sizeimage = bl_width * bl_height * blk->size;
288 u16 bytesperline = bl_width * blk->size / blk->height;
289
290 plane->sizeimage += sizeimage;
291 plane->bytesperline = max(plane->bytesperline, bytesperline);
292 }
293 }
294
mxr_g_fmt(struct file * file,void * priv,struct v4l2_format * f)295 static int mxr_g_fmt(struct file *file, void *priv,
296 struct v4l2_format *f)
297 {
298 struct mxr_layer *layer = video_drvdata(file);
299 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
300
301 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
302
303 pix->width = layer->geo.src.full_width;
304 pix->height = layer->geo.src.full_height;
305 pix->field = V4L2_FIELD_NONE;
306 pix->pixelformat = layer->fmt->fourcc;
307 pix->colorspace = layer->fmt->colorspace;
308 mxr_mplane_fill(pix->plane_fmt, layer->fmt, pix->width, pix->height);
309
310 return 0;
311 }
312
mxr_s_fmt(struct file * file,void * priv,struct v4l2_format * f)313 static int mxr_s_fmt(struct file *file, void *priv,
314 struct v4l2_format *f)
315 {
316 struct mxr_layer *layer = video_drvdata(file);
317 const struct mxr_format *fmt;
318 struct v4l2_pix_format_mplane *pix;
319 struct mxr_device *mdev = layer->mdev;
320 struct mxr_geometry *geo = &layer->geo;
321
322 mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
323
324 pix = &f->fmt.pix_mp;
325 fmt = find_format_by_fourcc(layer, pix->pixelformat);
326 if (fmt == NULL) {
327 mxr_warn(mdev, "not recognized fourcc: %08x\n",
328 pix->pixelformat);
329 return -EINVAL;
330 }
331 layer->fmt = fmt;
332 /* set source size to highest accepted value */
333 geo->src.full_width = max(geo->dst.full_width, pix->width);
334 geo->src.full_height = max(geo->dst.full_height, pix->height);
335 layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
336 mxr_geometry_dump(mdev, &layer->geo);
337 /* set cropping to total visible screen */
338 geo->src.width = pix->width;
339 geo->src.height = pix->height;
340 geo->src.x_offset = 0;
341 geo->src.y_offset = 0;
342 /* assure consistency of geometry */
343 layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
344 mxr_geometry_dump(mdev, &layer->geo);
345 /* set full size to lowest possible value */
346 geo->src.full_width = 0;
347 geo->src.full_height = 0;
348 layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
349 mxr_geometry_dump(mdev, &layer->geo);
350
351 /* returning results */
352 mxr_g_fmt(file, priv, f);
353
354 return 0;
355 }
356
mxr_g_selection(struct file * file,void * fh,struct v4l2_selection * s)357 static int mxr_g_selection(struct file *file, void *fh,
358 struct v4l2_selection *s)
359 {
360 struct mxr_layer *layer = video_drvdata(file);
361 struct mxr_geometry *geo = &layer->geo;
362
363 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
364
365 if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
366 s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
367 return -EINVAL;
368
369 switch (s->target) {
370 case V4L2_SEL_TGT_CROP_ACTIVE:
371 s->r.left = geo->src.x_offset;
372 s->r.top = geo->src.y_offset;
373 s->r.width = geo->src.width;
374 s->r.height = geo->src.height;
375 break;
376 case V4L2_SEL_TGT_CROP_DEFAULT:
377 case V4L2_SEL_TGT_CROP_BOUNDS:
378 s->r.left = 0;
379 s->r.top = 0;
380 s->r.width = geo->src.full_width;
381 s->r.height = geo->src.full_height;
382 break;
383 case V4L2_SEL_TGT_COMPOSE_ACTIVE:
384 case V4L2_SEL_TGT_COMPOSE_PADDED:
385 s->r.left = geo->dst.x_offset;
386 s->r.top = geo->dst.y_offset;
387 s->r.width = geo->dst.width;
388 s->r.height = geo->dst.height;
389 break;
390 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
391 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
392 s->r.left = 0;
393 s->r.top = 0;
394 s->r.width = geo->dst.full_width;
395 s->r.height = geo->dst.full_height;
396 break;
397 default:
398 return -EINVAL;
399 }
400
401 return 0;
402 }
403
404 /* returns 1 if rectangle 'a' is inside 'b' */
mxr_is_rect_inside(struct v4l2_rect * a,struct v4l2_rect * b)405 static int mxr_is_rect_inside(struct v4l2_rect *a, struct v4l2_rect *b)
406 {
407 if (a->left < b->left)
408 return 0;
409 if (a->top < b->top)
410 return 0;
411 if (a->left + a->width > b->left + b->width)
412 return 0;
413 if (a->top + a->height > b->top + b->height)
414 return 0;
415 return 1;
416 }
417
mxr_s_selection(struct file * file,void * fh,struct v4l2_selection * s)418 static int mxr_s_selection(struct file *file, void *fh,
419 struct v4l2_selection *s)
420 {
421 struct mxr_layer *layer = video_drvdata(file);
422 struct mxr_geometry *geo = &layer->geo;
423 struct mxr_crop *target = NULL;
424 enum mxr_geometry_stage stage;
425 struct mxr_geometry tmp;
426 struct v4l2_rect res;
427
428 memset(&res, 0, sizeof res);
429
430 mxr_dbg(layer->mdev, "%s: rect: %dx%d@%d,%d\n", __func__,
431 s->r.width, s->r.height, s->r.left, s->r.top);
432
433 if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
434 s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
435 return -EINVAL;
436
437 switch (s->target) {
438 /* ignore read-only targets */
439 case V4L2_SEL_TGT_CROP_DEFAULT:
440 case V4L2_SEL_TGT_CROP_BOUNDS:
441 res.width = geo->src.full_width;
442 res.height = geo->src.full_height;
443 break;
444
445 /* ignore read-only targets */
446 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
447 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
448 res.width = geo->dst.full_width;
449 res.height = geo->dst.full_height;
450 break;
451
452 case V4L2_SEL_TGT_CROP_ACTIVE:
453 target = &geo->src;
454 stage = MXR_GEOMETRY_CROP;
455 break;
456 case V4L2_SEL_TGT_COMPOSE_ACTIVE:
457 case V4L2_SEL_TGT_COMPOSE_PADDED:
458 target = &geo->dst;
459 stage = MXR_GEOMETRY_COMPOSE;
460 break;
461 default:
462 return -EINVAL;
463 }
464 /* apply change and update geometry if needed */
465 if (target) {
466 /* backup current geometry if setup fails */
467 memcpy(&tmp, geo, sizeof tmp);
468
469 /* apply requested selection */
470 target->x_offset = s->r.left;
471 target->y_offset = s->r.top;
472 target->width = s->r.width;
473 target->height = s->r.height;
474
475 layer->ops.fix_geometry(layer, stage, s->flags);
476
477 /* retrieve update selection rectangle */
478 res.left = target->x_offset;
479 res.top = target->y_offset;
480 res.width = target->width;
481 res.height = target->height;
482
483 mxr_geometry_dump(layer->mdev, &layer->geo);
484 }
485
486 /* checking if the rectangle satisfies constraints */
487 if ((s->flags & V4L2_SEL_FLAG_LE) && !mxr_is_rect_inside(&res, &s->r))
488 goto fail;
489 if ((s->flags & V4L2_SEL_FLAG_GE) && !mxr_is_rect_inside(&s->r, &res))
490 goto fail;
491
492 /* return result rectangle */
493 s->r = res;
494
495 return 0;
496 fail:
497 /* restore old geometry, which is not touched if target is NULL */
498 if (target)
499 memcpy(geo, &tmp, sizeof tmp);
500 return -ERANGE;
501 }
502
mxr_enum_dv_presets(struct file * file,void * fh,struct v4l2_dv_enum_preset * preset)503 static int mxr_enum_dv_presets(struct file *file, void *fh,
504 struct v4l2_dv_enum_preset *preset)
505 {
506 struct mxr_layer *layer = video_drvdata(file);
507 struct mxr_device *mdev = layer->mdev;
508 int ret;
509
510 /* lock protects from changing sd_out */
511 mutex_lock(&mdev->mutex);
512 ret = v4l2_subdev_call(to_outsd(mdev), video, enum_dv_presets, preset);
513 mutex_unlock(&mdev->mutex);
514
515 return ret ? -EINVAL : 0;
516 }
517
mxr_s_dv_preset(struct file * file,void * fh,struct v4l2_dv_preset * preset)518 static int mxr_s_dv_preset(struct file *file, void *fh,
519 struct v4l2_dv_preset *preset)
520 {
521 struct mxr_layer *layer = video_drvdata(file);
522 struct mxr_device *mdev = layer->mdev;
523 int ret;
524
525 /* lock protects from changing sd_out */
526 mutex_lock(&mdev->mutex);
527
528 /* preset change cannot be done while there is an entity
529 * dependant on output configuration
530 */
531 if (mdev->n_output > 0) {
532 mutex_unlock(&mdev->mutex);
533 return -EBUSY;
534 }
535
536 ret = v4l2_subdev_call(to_outsd(mdev), video, s_dv_preset, preset);
537
538 mutex_unlock(&mdev->mutex);
539
540 mxr_layer_update_output(layer);
541
542 /* any failure should return EINVAL according to V4L2 doc */
543 return ret ? -EINVAL : 0;
544 }
545
mxr_g_dv_preset(struct file * file,void * fh,struct v4l2_dv_preset * preset)546 static int mxr_g_dv_preset(struct file *file, void *fh,
547 struct v4l2_dv_preset *preset)
548 {
549 struct mxr_layer *layer = video_drvdata(file);
550 struct mxr_device *mdev = layer->mdev;
551 int ret;
552
553 /* lock protects from changing sd_out */
554 mutex_lock(&mdev->mutex);
555 ret = v4l2_subdev_call(to_outsd(mdev), video, g_dv_preset, preset);
556 mutex_unlock(&mdev->mutex);
557
558 return ret ? -EINVAL : 0;
559 }
560
mxr_s_std(struct file * file,void * fh,v4l2_std_id * norm)561 static int mxr_s_std(struct file *file, void *fh, v4l2_std_id *norm)
562 {
563 struct mxr_layer *layer = video_drvdata(file);
564 struct mxr_device *mdev = layer->mdev;
565 int ret;
566
567 /* lock protects from changing sd_out */
568 mutex_lock(&mdev->mutex);
569
570 /* standard change cannot be done while there is an entity
571 * dependant on output configuration
572 */
573 if (mdev->n_output > 0) {
574 mutex_unlock(&mdev->mutex);
575 return -EBUSY;
576 }
577
578 ret = v4l2_subdev_call(to_outsd(mdev), video, s_std_output, *norm);
579
580 mutex_unlock(&mdev->mutex);
581
582 mxr_layer_update_output(layer);
583
584 return ret ? -EINVAL : 0;
585 }
586
mxr_g_std(struct file * file,void * fh,v4l2_std_id * norm)587 static int mxr_g_std(struct file *file, void *fh, v4l2_std_id *norm)
588 {
589 struct mxr_layer *layer = video_drvdata(file);
590 struct mxr_device *mdev = layer->mdev;
591 int ret;
592
593 /* lock protects from changing sd_out */
594 mutex_lock(&mdev->mutex);
595 ret = v4l2_subdev_call(to_outsd(mdev), video, g_std_output, norm);
596 mutex_unlock(&mdev->mutex);
597
598 return ret ? -EINVAL : 0;
599 }
600
mxr_enum_output(struct file * file,void * fh,struct v4l2_output * a)601 static int mxr_enum_output(struct file *file, void *fh, struct v4l2_output *a)
602 {
603 struct mxr_layer *layer = video_drvdata(file);
604 struct mxr_device *mdev = layer->mdev;
605 struct mxr_output *out;
606 struct v4l2_subdev *sd;
607
608 if (a->index >= mdev->output_cnt)
609 return -EINVAL;
610 out = mdev->output[a->index];
611 BUG_ON(out == NULL);
612 sd = out->sd;
613 strlcpy(a->name, out->name, sizeof(a->name));
614
615 /* try to obtain supported tv norms */
616 v4l2_subdev_call(sd, video, g_tvnorms_output, &a->std);
617 a->capabilities = 0;
618 if (sd->ops->video && sd->ops->video->s_dv_preset)
619 a->capabilities |= V4L2_OUT_CAP_PRESETS;
620 if (sd->ops->video && sd->ops->video->s_std_output)
621 a->capabilities |= V4L2_OUT_CAP_STD;
622 a->type = V4L2_OUTPUT_TYPE_ANALOG;
623
624 return 0;
625 }
626
mxr_s_output(struct file * file,void * fh,unsigned int i)627 static int mxr_s_output(struct file *file, void *fh, unsigned int i)
628 {
629 struct video_device *vfd = video_devdata(file);
630 struct mxr_layer *layer = video_drvdata(file);
631 struct mxr_device *mdev = layer->mdev;
632
633 if (i >= mdev->output_cnt || mdev->output[i] == NULL)
634 return -EINVAL;
635
636 mutex_lock(&mdev->mutex);
637 if (mdev->n_output > 0) {
638 mutex_unlock(&mdev->mutex);
639 return -EBUSY;
640 }
641 mdev->current_output = i;
642 vfd->tvnorms = 0;
643 v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
644 &vfd->tvnorms);
645 mutex_unlock(&mdev->mutex);
646
647 /* update layers geometry */
648 mxr_layer_update_output(layer);
649
650 mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
651
652 return 0;
653 }
654
mxr_g_output(struct file * file,void * fh,unsigned int * p)655 static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
656 {
657 struct mxr_layer *layer = video_drvdata(file);
658 struct mxr_device *mdev = layer->mdev;
659
660 mutex_lock(&mdev->mutex);
661 *p = mdev->current_output;
662 mutex_unlock(&mdev->mutex);
663
664 return 0;
665 }
666
mxr_reqbufs(struct file * file,void * priv,struct v4l2_requestbuffers * p)667 static int mxr_reqbufs(struct file *file, void *priv,
668 struct v4l2_requestbuffers *p)
669 {
670 struct mxr_layer *layer = video_drvdata(file);
671
672 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
673 return vb2_reqbufs(&layer->vb_queue, p);
674 }
675
mxr_querybuf(struct file * file,void * priv,struct v4l2_buffer * p)676 static int mxr_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
677 {
678 struct mxr_layer *layer = video_drvdata(file);
679
680 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
681 return vb2_querybuf(&layer->vb_queue, p);
682 }
683
mxr_qbuf(struct file * file,void * priv,struct v4l2_buffer * p)684 static int mxr_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
685 {
686 struct mxr_layer *layer = video_drvdata(file);
687
688 mxr_dbg(layer->mdev, "%s:%d(%d)\n", __func__, __LINE__, p->index);
689 return vb2_qbuf(&layer->vb_queue, p);
690 }
691
mxr_dqbuf(struct file * file,void * priv,struct v4l2_buffer * p)692 static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
693 {
694 struct mxr_layer *layer = video_drvdata(file);
695
696 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
697 return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
698 }
699
mxr_streamon(struct file * file,void * priv,enum v4l2_buf_type i)700 static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
701 {
702 struct mxr_layer *layer = video_drvdata(file);
703
704 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
705 return vb2_streamon(&layer->vb_queue, i);
706 }
707
mxr_streamoff(struct file * file,void * priv,enum v4l2_buf_type i)708 static int mxr_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
709 {
710 struct mxr_layer *layer = video_drvdata(file);
711
712 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
713 return vb2_streamoff(&layer->vb_queue, i);
714 }
715
716 static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
717 .vidioc_querycap = mxr_querycap,
718 /* format handling */
719 .vidioc_enum_fmt_vid_out = mxr_enum_fmt,
720 .vidioc_s_fmt_vid_out_mplane = mxr_s_fmt,
721 .vidioc_g_fmt_vid_out_mplane = mxr_g_fmt,
722 /* buffer control */
723 .vidioc_reqbufs = mxr_reqbufs,
724 .vidioc_querybuf = mxr_querybuf,
725 .vidioc_qbuf = mxr_qbuf,
726 .vidioc_dqbuf = mxr_dqbuf,
727 /* Streaming control */
728 .vidioc_streamon = mxr_streamon,
729 .vidioc_streamoff = mxr_streamoff,
730 /* Preset functions */
731 .vidioc_enum_dv_presets = mxr_enum_dv_presets,
732 .vidioc_s_dv_preset = mxr_s_dv_preset,
733 .vidioc_g_dv_preset = mxr_g_dv_preset,
734 /* analog TV standard functions */
735 .vidioc_s_std = mxr_s_std,
736 .vidioc_g_std = mxr_g_std,
737 /* Output handling */
738 .vidioc_enum_output = mxr_enum_output,
739 .vidioc_s_output = mxr_s_output,
740 .vidioc_g_output = mxr_g_output,
741 /* selection ioctls */
742 .vidioc_g_selection = mxr_g_selection,
743 .vidioc_s_selection = mxr_s_selection,
744 };
745
mxr_video_open(struct file * file)746 static int mxr_video_open(struct file *file)
747 {
748 struct mxr_layer *layer = video_drvdata(file);
749 struct mxr_device *mdev = layer->mdev;
750 int ret = 0;
751
752 mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
753 /* assure device probe is finished */
754 wait_for_device_probe();
755 /* creating context for file descriptor */
756 ret = v4l2_fh_open(file);
757 if (ret) {
758 mxr_err(mdev, "v4l2_fh_open failed\n");
759 return ret;
760 }
761
762 /* leaving if layer is already initialized */
763 if (!v4l2_fh_is_singular_file(file))
764 return 0;
765
766 /* FIXME: should power be enabled on open? */
767 ret = mxr_power_get(mdev);
768 if (ret) {
769 mxr_err(mdev, "power on failed\n");
770 goto fail_fh_open;
771 }
772
773 ret = vb2_queue_init(&layer->vb_queue);
774 if (ret != 0) {
775 mxr_err(mdev, "failed to initialize vb2 queue\n");
776 goto fail_power;
777 }
778 /* set default format, first on the list */
779 layer->fmt = layer->fmt_array[0];
780 /* setup default geometry */
781 mxr_layer_default_geo(layer);
782
783 return 0;
784
785 fail_power:
786 mxr_power_put(mdev);
787
788 fail_fh_open:
789 v4l2_fh_release(file);
790
791 return ret;
792 }
793
794 static unsigned int
mxr_video_poll(struct file * file,struct poll_table_struct * wait)795 mxr_video_poll(struct file *file, struct poll_table_struct *wait)
796 {
797 struct mxr_layer *layer = video_drvdata(file);
798
799 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
800
801 return vb2_poll(&layer->vb_queue, file, wait);
802 }
803
mxr_video_mmap(struct file * file,struct vm_area_struct * vma)804 static int mxr_video_mmap(struct file *file, struct vm_area_struct *vma)
805 {
806 struct mxr_layer *layer = video_drvdata(file);
807
808 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
809
810 return vb2_mmap(&layer->vb_queue, vma);
811 }
812
mxr_video_release(struct file * file)813 static int mxr_video_release(struct file *file)
814 {
815 struct mxr_layer *layer = video_drvdata(file);
816
817 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
818 if (v4l2_fh_is_singular_file(file)) {
819 vb2_queue_release(&layer->vb_queue);
820 mxr_power_put(layer->mdev);
821 }
822 v4l2_fh_release(file);
823 return 0;
824 }
825
826 static const struct v4l2_file_operations mxr_fops = {
827 .owner = THIS_MODULE,
828 .open = mxr_video_open,
829 .poll = mxr_video_poll,
830 .mmap = mxr_video_mmap,
831 .release = mxr_video_release,
832 .unlocked_ioctl = video_ioctl2,
833 };
834
queue_setup(struct vb2_queue * vq,const struct v4l2_format * pfmt,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],void * alloc_ctxs[])835 static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
836 unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[],
837 void *alloc_ctxs[])
838 {
839 struct mxr_layer *layer = vb2_get_drv_priv(vq);
840 const struct mxr_format *fmt = layer->fmt;
841 int i;
842 struct mxr_device *mdev = layer->mdev;
843 struct v4l2_plane_pix_format planes[3];
844
845 mxr_dbg(mdev, "%s\n", __func__);
846 /* checking if format was configured */
847 if (fmt == NULL)
848 return -EINVAL;
849 mxr_dbg(mdev, "fmt = %s\n", fmt->name);
850 mxr_mplane_fill(planes, fmt, layer->geo.src.full_width,
851 layer->geo.src.full_height);
852
853 *nplanes = fmt->num_subframes;
854 for (i = 0; i < fmt->num_subframes; ++i) {
855 alloc_ctxs[i] = layer->mdev->alloc_ctx;
856 sizes[i] = PAGE_ALIGN(planes[i].sizeimage);
857 mxr_dbg(mdev, "size[%d] = %08lx\n", i, sizes[i]);
858 }
859
860 if (*nbuffers == 0)
861 *nbuffers = 1;
862
863 return 0;
864 }
865
buf_queue(struct vb2_buffer * vb)866 static void buf_queue(struct vb2_buffer *vb)
867 {
868 struct mxr_buffer *buffer = container_of(vb, struct mxr_buffer, vb);
869 struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
870 struct mxr_device *mdev = layer->mdev;
871 unsigned long flags;
872
873 spin_lock_irqsave(&layer->enq_slock, flags);
874 list_add_tail(&buffer->list, &layer->enq_list);
875 spin_unlock_irqrestore(&layer->enq_slock, flags);
876
877 mxr_dbg(mdev, "queuing buffer\n");
878 }
879
wait_lock(struct vb2_queue * vq)880 static void wait_lock(struct vb2_queue *vq)
881 {
882 struct mxr_layer *layer = vb2_get_drv_priv(vq);
883
884 mxr_dbg(layer->mdev, "%s\n", __func__);
885 mutex_lock(&layer->mutex);
886 }
887
wait_unlock(struct vb2_queue * vq)888 static void wait_unlock(struct vb2_queue *vq)
889 {
890 struct mxr_layer *layer = vb2_get_drv_priv(vq);
891
892 mxr_dbg(layer->mdev, "%s\n", __func__);
893 mutex_unlock(&layer->mutex);
894 }
895
start_streaming(struct vb2_queue * vq,unsigned int count)896 static int start_streaming(struct vb2_queue *vq, unsigned int count)
897 {
898 struct mxr_layer *layer = vb2_get_drv_priv(vq);
899 struct mxr_device *mdev = layer->mdev;
900 unsigned long flags;
901
902 mxr_dbg(mdev, "%s\n", __func__);
903
904 if (count == 0) {
905 mxr_dbg(mdev, "no output buffers queued\n");
906 return -EINVAL;
907 }
908
909 /* block any changes in output configuration */
910 mxr_output_get(mdev);
911
912 mxr_layer_update_output(layer);
913 layer->ops.format_set(layer);
914 /* enabling layer in hardware */
915 spin_lock_irqsave(&layer->enq_slock, flags);
916 layer->state = MXR_LAYER_STREAMING;
917 spin_unlock_irqrestore(&layer->enq_slock, flags);
918
919 layer->ops.stream_set(layer, MXR_ENABLE);
920 mxr_streamer_get(mdev);
921
922 return 0;
923 }
924
mxr_watchdog(unsigned long arg)925 static void mxr_watchdog(unsigned long arg)
926 {
927 struct mxr_layer *layer = (struct mxr_layer *) arg;
928 struct mxr_device *mdev = layer->mdev;
929 unsigned long flags;
930
931 mxr_err(mdev, "watchdog fired for layer %s\n", layer->vfd.name);
932
933 spin_lock_irqsave(&layer->enq_slock, flags);
934
935 if (layer->update_buf == layer->shadow_buf)
936 layer->update_buf = NULL;
937 if (layer->update_buf) {
938 vb2_buffer_done(&layer->update_buf->vb, VB2_BUF_STATE_ERROR);
939 layer->update_buf = NULL;
940 }
941 if (layer->shadow_buf) {
942 vb2_buffer_done(&layer->shadow_buf->vb, VB2_BUF_STATE_ERROR);
943 layer->shadow_buf = NULL;
944 }
945 spin_unlock_irqrestore(&layer->enq_slock, flags);
946 }
947
stop_streaming(struct vb2_queue * vq)948 static int stop_streaming(struct vb2_queue *vq)
949 {
950 struct mxr_layer *layer = vb2_get_drv_priv(vq);
951 struct mxr_device *mdev = layer->mdev;
952 unsigned long flags;
953 struct timer_list watchdog;
954 struct mxr_buffer *buf, *buf_tmp;
955
956 mxr_dbg(mdev, "%s\n", __func__);
957
958 spin_lock_irqsave(&layer->enq_slock, flags);
959
960 /* reset list */
961 layer->state = MXR_LAYER_STREAMING_FINISH;
962
963 /* set all buffer to be done */
964 list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
965 list_del(&buf->list);
966 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
967 }
968
969 spin_unlock_irqrestore(&layer->enq_slock, flags);
970
971 /* give 1 seconds to complete to complete last buffers */
972 setup_timer_on_stack(&watchdog, mxr_watchdog,
973 (unsigned long)layer);
974 mod_timer(&watchdog, jiffies + msecs_to_jiffies(1000));
975
976 /* wait until all buffers are goes to done state */
977 vb2_wait_for_all_buffers(vq);
978
979 /* stop timer if all synchronization is done */
980 del_timer_sync(&watchdog);
981 destroy_timer_on_stack(&watchdog);
982
983 /* stopping hardware */
984 spin_lock_irqsave(&layer->enq_slock, flags);
985 layer->state = MXR_LAYER_IDLE;
986 spin_unlock_irqrestore(&layer->enq_slock, flags);
987
988 /* disabling layer in hardware */
989 layer->ops.stream_set(layer, MXR_DISABLE);
990 /* remove one streamer */
991 mxr_streamer_put(mdev);
992 /* allow changes in output configuration */
993 mxr_output_put(mdev);
994 return 0;
995 }
996
997 static struct vb2_ops mxr_video_qops = {
998 .queue_setup = queue_setup,
999 .buf_queue = buf_queue,
1000 .wait_prepare = wait_unlock,
1001 .wait_finish = wait_lock,
1002 .start_streaming = start_streaming,
1003 .stop_streaming = stop_streaming,
1004 };
1005
1006 /* FIXME: try to put this functions to mxr_base_layer_create */
mxr_base_layer_register(struct mxr_layer * layer)1007 int mxr_base_layer_register(struct mxr_layer *layer)
1008 {
1009 struct mxr_device *mdev = layer->mdev;
1010 int ret;
1011
1012 ret = video_register_device(&layer->vfd, VFL_TYPE_GRABBER, -1);
1013 if (ret)
1014 mxr_err(mdev, "failed to register video device\n");
1015 else
1016 mxr_info(mdev, "registered layer %s as /dev/video%d\n",
1017 layer->vfd.name, layer->vfd.num);
1018 return ret;
1019 }
1020
mxr_base_layer_unregister(struct mxr_layer * layer)1021 void mxr_base_layer_unregister(struct mxr_layer *layer)
1022 {
1023 video_unregister_device(&layer->vfd);
1024 }
1025
mxr_layer_release(struct mxr_layer * layer)1026 void mxr_layer_release(struct mxr_layer *layer)
1027 {
1028 if (layer->ops.release)
1029 layer->ops.release(layer);
1030 }
1031
mxr_base_layer_release(struct mxr_layer * layer)1032 void mxr_base_layer_release(struct mxr_layer *layer)
1033 {
1034 kfree(layer);
1035 }
1036
mxr_vfd_release(struct video_device * vdev)1037 static void mxr_vfd_release(struct video_device *vdev)
1038 {
1039 printk(KERN_INFO "video device release\n");
1040 }
1041
mxr_base_layer_create(struct mxr_device * mdev,int idx,char * name,struct mxr_layer_ops * ops)1042 struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
1043 int idx, char *name, struct mxr_layer_ops *ops)
1044 {
1045 struct mxr_layer *layer;
1046
1047 layer = kzalloc(sizeof *layer, GFP_KERNEL);
1048 if (layer == NULL) {
1049 mxr_err(mdev, "not enough memory for layer.\n");
1050 goto fail;
1051 }
1052
1053 layer->mdev = mdev;
1054 layer->idx = idx;
1055 layer->ops = *ops;
1056
1057 spin_lock_init(&layer->enq_slock);
1058 INIT_LIST_HEAD(&layer->enq_list);
1059 mutex_init(&layer->mutex);
1060
1061 layer->vfd = (struct video_device) {
1062 .minor = -1,
1063 .release = mxr_vfd_release,
1064 .fops = &mxr_fops,
1065 .ioctl_ops = &mxr_ioctl_ops,
1066 };
1067 strlcpy(layer->vfd.name, name, sizeof(layer->vfd.name));
1068 /* let framework control PRIORITY */
1069 set_bit(V4L2_FL_USE_FH_PRIO, &layer->vfd.flags);
1070
1071 video_set_drvdata(&layer->vfd, layer);
1072 layer->vfd.lock = &layer->mutex;
1073 layer->vfd.v4l2_dev = &mdev->v4l2_dev;
1074
1075 layer->vb_queue = (struct vb2_queue) {
1076 .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
1077 .io_modes = VB2_MMAP | VB2_USERPTR,
1078 .drv_priv = layer,
1079 .buf_struct_size = sizeof(struct mxr_buffer),
1080 .ops = &mxr_video_qops,
1081 .mem_ops = &vb2_dma_contig_memops,
1082 };
1083
1084 return layer;
1085
1086 fail:
1087 return NULL;
1088 }
1089
find_format_by_fourcc(struct mxr_layer * layer,unsigned long fourcc)1090 static const struct mxr_format *find_format_by_fourcc(
1091 struct mxr_layer *layer, unsigned long fourcc)
1092 {
1093 int i;
1094
1095 for (i = 0; i < layer->fmt_array_size; ++i)
1096 if (layer->fmt_array[i]->fourcc == fourcc)
1097 return layer->fmt_array[i];
1098 return NULL;
1099 }
1100
find_format_by_index(struct mxr_layer * layer,unsigned long index)1101 static const struct mxr_format *find_format_by_index(
1102 struct mxr_layer *layer, unsigned long index)
1103 {
1104 if (index >= layer->fmt_array_size)
1105 return NULL;
1106 return layer->fmt_array[index];
1107 }
1108
1109