1 /*
2 * ispvideo.c
3 *
4 * TI OMAP3 ISP - Generic video node
5 *
6 * Copyright (C) 2009-2010 Nokia Corporation
7 *
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26 #include <asm/cacheflush.h>
27 #include <linux/clk.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/pagemap.h>
31 #include <linux/scatterlist.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/vmalloc.h>
35 #include <media/v4l2-dev.h>
36 #include <media/v4l2-ioctl.h>
37 #include <plat/iommu.h>
38 #include <plat/iovmm.h>
39 #include <plat/omap-pm.h>
40
41 #include "ispvideo.h"
42 #include "isp.h"
43
44
45 /* -----------------------------------------------------------------------------
46 * Helper functions
47 */
48
49 static struct isp_format_info formats[] = {
50 { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
51 V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
52 V4L2_PIX_FMT_GREY, 8, },
53 { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10,
54 V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8,
55 V4L2_PIX_FMT_Y10, 10, },
56 { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10,
57 V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8,
58 V4L2_PIX_FMT_Y12, 12, },
59 { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
60 V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
61 V4L2_PIX_FMT_SBGGR8, 8, },
62 { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
63 V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
64 V4L2_PIX_FMT_SGBRG8, 8, },
65 { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
66 V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
67 V4L2_PIX_FMT_SGRBG8, 8, },
68 { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
69 V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
70 V4L2_PIX_FMT_SRGGB8, 8, },
71 { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
72 V4L2_MBUS_FMT_SGRBG10_1X10, 0,
73 V4L2_PIX_FMT_SGRBG10DPCM8, 8, },
74 { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
75 V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
76 V4L2_PIX_FMT_SBGGR10, 10, },
77 { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
78 V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8,
79 V4L2_PIX_FMT_SGBRG10, 10, },
80 { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
81 V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8,
82 V4L2_PIX_FMT_SGRBG10, 10, },
83 { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
84 V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8,
85 V4L2_PIX_FMT_SRGGB10, 10, },
86 { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
87 V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8,
88 V4L2_PIX_FMT_SBGGR12, 12, },
89 { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
90 V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8,
91 V4L2_PIX_FMT_SGBRG12, 12, },
92 { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
93 V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8,
94 V4L2_PIX_FMT_SGRBG12, 12, },
95 { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
96 V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8,
97 V4L2_PIX_FMT_SRGGB12, 12, },
98 { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
99 V4L2_MBUS_FMT_UYVY8_1X16, 0,
100 V4L2_PIX_FMT_UYVY, 16, },
101 { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
102 V4L2_MBUS_FMT_YUYV8_1X16, 0,
103 V4L2_PIX_FMT_YUYV, 16, },
104 };
105
106 const struct isp_format_info *
omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)107 omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)
108 {
109 unsigned int i;
110
111 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
112 if (formats[i].code == code)
113 return &formats[i];
114 }
115
116 return NULL;
117 }
118
119 /*
120 * Decide whether desired output pixel code can be obtained with
121 * the lane shifter by shifting the input pixel code.
122 * @in: input pixelcode to shifter
123 * @out: output pixelcode from shifter
124 * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0]
125 *
126 * return true if the combination is possible
127 * return false otherwise
128 */
isp_video_is_shiftable(enum v4l2_mbus_pixelcode in,enum v4l2_mbus_pixelcode out,unsigned int additional_shift)129 static bool isp_video_is_shiftable(enum v4l2_mbus_pixelcode in,
130 enum v4l2_mbus_pixelcode out,
131 unsigned int additional_shift)
132 {
133 const struct isp_format_info *in_info, *out_info;
134
135 if (in == out)
136 return true;
137
138 in_info = omap3isp_video_format_info(in);
139 out_info = omap3isp_video_format_info(out);
140
141 if ((in_info->flavor == 0) || (out_info->flavor == 0))
142 return false;
143
144 if (in_info->flavor != out_info->flavor)
145 return false;
146
147 return in_info->bpp - out_info->bpp + additional_shift <= 6;
148 }
149
150 /*
151 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
152 * @video: ISP video instance
153 * @mbus: v4l2_mbus_framefmt format (input)
154 * @pix: v4l2_pix_format format (output)
155 *
156 * Fill the output pix structure with information from the input mbus format.
157 * The bytesperline and sizeimage fields are computed from the requested bytes
158 * per line value in the pix format and information from the video instance.
159 *
160 * Return the number of padding bytes at end of line.
161 */
isp_video_mbus_to_pix(const struct isp_video * video,const struct v4l2_mbus_framefmt * mbus,struct v4l2_pix_format * pix)162 static unsigned int isp_video_mbus_to_pix(const struct isp_video *video,
163 const struct v4l2_mbus_framefmt *mbus,
164 struct v4l2_pix_format *pix)
165 {
166 unsigned int bpl = pix->bytesperline;
167 unsigned int min_bpl;
168 unsigned int i;
169
170 memset(pix, 0, sizeof(*pix));
171 pix->width = mbus->width;
172 pix->height = mbus->height;
173
174 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
175 if (formats[i].code == mbus->code)
176 break;
177 }
178
179 if (WARN_ON(i == ARRAY_SIZE(formats)))
180 return 0;
181
182 min_bpl = pix->width * ALIGN(formats[i].bpp, 8) / 8;
183
184 /* Clamp the requested bytes per line value. If the maximum bytes per
185 * line value is zero, the module doesn't support user configurable line
186 * sizes. Override the requested value with the minimum in that case.
187 */
188 if (video->bpl_max)
189 bpl = clamp(bpl, min_bpl, video->bpl_max);
190 else
191 bpl = min_bpl;
192
193 if (!video->bpl_zero_padding || bpl != min_bpl)
194 bpl = ALIGN(bpl, video->bpl_alignment);
195
196 pix->pixelformat = formats[i].pixelformat;
197 pix->bytesperline = bpl;
198 pix->sizeimage = pix->bytesperline * pix->height;
199 pix->colorspace = mbus->colorspace;
200 pix->field = mbus->field;
201
202 return bpl - min_bpl;
203 }
204
isp_video_pix_to_mbus(const struct v4l2_pix_format * pix,struct v4l2_mbus_framefmt * mbus)205 static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
206 struct v4l2_mbus_framefmt *mbus)
207 {
208 unsigned int i;
209
210 memset(mbus, 0, sizeof(*mbus));
211 mbus->width = pix->width;
212 mbus->height = pix->height;
213
214 /* Skip the last format in the loop so that it will be selected if no
215 * match is found.
216 */
217 for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
218 if (formats[i].pixelformat == pix->pixelformat)
219 break;
220 }
221
222 mbus->code = formats[i].code;
223 mbus->colorspace = pix->colorspace;
224 mbus->field = pix->field;
225 }
226
227 static struct v4l2_subdev *
isp_video_remote_subdev(struct isp_video * video,u32 * pad)228 isp_video_remote_subdev(struct isp_video *video, u32 *pad)
229 {
230 struct media_pad *remote;
231
232 remote = media_entity_remote_source(&video->pad);
233
234 if (remote == NULL ||
235 media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
236 return NULL;
237
238 if (pad)
239 *pad = remote->index;
240
241 return media_entity_to_v4l2_subdev(remote->entity);
242 }
243
244 /* Return a pointer to the ISP video instance at the far end of the pipeline. */
245 static struct isp_video *
isp_video_far_end(struct isp_video * video)246 isp_video_far_end(struct isp_video *video)
247 {
248 struct media_entity_graph graph;
249 struct media_entity *entity = &video->video.entity;
250 struct media_device *mdev = entity->parent;
251 struct isp_video *far_end = NULL;
252
253 mutex_lock(&mdev->graph_mutex);
254 media_entity_graph_walk_start(&graph, entity);
255
256 while ((entity = media_entity_graph_walk_next(&graph))) {
257 if (entity == &video->video.entity)
258 continue;
259
260 if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
261 continue;
262
263 far_end = to_isp_video(media_entity_to_video_device(entity));
264 if (far_end->type != video->type)
265 break;
266
267 far_end = NULL;
268 }
269
270 mutex_unlock(&mdev->graph_mutex);
271 return far_end;
272 }
273
274 /*
275 * Validate a pipeline by checking both ends of all links for format
276 * discrepancies.
277 *
278 * Compute the minimum time per frame value as the maximum of time per frame
279 * limits reported by every block in the pipeline.
280 *
281 * Return 0 if all formats match, or -EPIPE if at least one link is found with
282 * different formats on its two ends or if the pipeline doesn't start with a
283 * video source (either a subdev with no input pad, or a non-subdev entity).
284 */
isp_video_validate_pipeline(struct isp_pipeline * pipe)285 static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
286 {
287 struct isp_device *isp = pipe->output->isp;
288 struct v4l2_subdev_format fmt_source;
289 struct v4l2_subdev_format fmt_sink;
290 struct media_pad *pad;
291 struct v4l2_subdev *subdev;
292 int ret;
293
294 pipe->max_rate = pipe->l3_ick;
295
296 subdev = isp_video_remote_subdev(pipe->output, NULL);
297 if (subdev == NULL)
298 return -EPIPE;
299
300 while (1) {
301 unsigned int shifter_link;
302 /* Retrieve the sink format */
303 pad = &subdev->entity.pads[0];
304 if (!(pad->flags & MEDIA_PAD_FL_SINK))
305 break;
306
307 fmt_sink.pad = pad->index;
308 fmt_sink.which = V4L2_SUBDEV_FORMAT_ACTIVE;
309 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_sink);
310 if (ret < 0 && ret != -ENOIOCTLCMD)
311 return -EPIPE;
312
313 /* Update the maximum frame rate */
314 if (subdev == &isp->isp_res.subdev)
315 omap3isp_resizer_max_rate(&isp->isp_res,
316 &pipe->max_rate);
317
318 /* Check ccdc maximum data rate when data comes from sensor
319 * TODO: Include ccdc rate in pipe->max_rate and compare the
320 * total pipe rate with the input data rate from sensor.
321 */
322 if (subdev == &isp->isp_ccdc.subdev && pipe->input == NULL) {
323 unsigned int rate = UINT_MAX;
324
325 omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
326 if (isp->isp_ccdc.vpcfg.pixelclk > rate)
327 return -ENOSPC;
328 }
329
330 /* If sink pad is on CCDC, the link has the lane shifter
331 * in the middle of it. */
332 shifter_link = subdev == &isp->isp_ccdc.subdev;
333
334 /* Retrieve the source format. Return an error if no source
335 * entity can be found, and stop checking the pipeline if the
336 * source entity isn't a subdev.
337 */
338 pad = media_entity_remote_source(pad);
339 if (pad == NULL)
340 return -EPIPE;
341
342 if (media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
343 break;
344
345 subdev = media_entity_to_v4l2_subdev(pad->entity);
346
347 fmt_source.pad = pad->index;
348 fmt_source.which = V4L2_SUBDEV_FORMAT_ACTIVE;
349 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_source);
350 if (ret < 0 && ret != -ENOIOCTLCMD)
351 return -EPIPE;
352
353 /* Check if the two ends match */
354 if (fmt_source.format.width != fmt_sink.format.width ||
355 fmt_source.format.height != fmt_sink.format.height)
356 return -EPIPE;
357
358 if (shifter_link) {
359 unsigned int parallel_shift = 0;
360 if (isp->isp_ccdc.input == CCDC_INPUT_PARALLEL) {
361 struct isp_parallel_platform_data *pdata =
362 &((struct isp_v4l2_subdevs_group *)
363 subdev->host_priv)->bus.parallel;
364 parallel_shift = pdata->data_lane_shift * 2;
365 }
366 if (!isp_video_is_shiftable(fmt_source.format.code,
367 fmt_sink.format.code,
368 parallel_shift))
369 return -EPIPE;
370 } else if (fmt_source.format.code != fmt_sink.format.code)
371 return -EPIPE;
372 }
373
374 return 0;
375 }
376
377 static int
__isp_video_get_format(struct isp_video * video,struct v4l2_format * format)378 __isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
379 {
380 struct v4l2_subdev_format fmt;
381 struct v4l2_subdev *subdev;
382 u32 pad;
383 int ret;
384
385 subdev = isp_video_remote_subdev(video, &pad);
386 if (subdev == NULL)
387 return -EINVAL;
388
389 mutex_lock(&video->mutex);
390
391 fmt.pad = pad;
392 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
393 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
394 if (ret == -ENOIOCTLCMD)
395 ret = -EINVAL;
396
397 mutex_unlock(&video->mutex);
398
399 if (ret)
400 return ret;
401
402 format->type = video->type;
403 return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
404 }
405
406 static int
isp_video_check_format(struct isp_video * video,struct isp_video_fh * vfh)407 isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
408 {
409 struct v4l2_format format;
410 int ret;
411
412 memcpy(&format, &vfh->format, sizeof(format));
413 ret = __isp_video_get_format(video, &format);
414 if (ret < 0)
415 return ret;
416
417 if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat ||
418 vfh->format.fmt.pix.height != format.fmt.pix.height ||
419 vfh->format.fmt.pix.width != format.fmt.pix.width ||
420 vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline ||
421 vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage)
422 return -EINVAL;
423
424 return ret;
425 }
426
427 /* -----------------------------------------------------------------------------
428 * IOMMU management
429 */
430
431 #define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
432
433 /*
434 * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list
435 * @dev: Device pointer specific to the OMAP3 ISP.
436 * @sglist: Pointer to source Scatter gather list to allocate.
437 * @sglen: Number of elements of the scatter-gatter list.
438 *
439 * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if
440 * we ran out of memory.
441 */
442 static dma_addr_t
ispmmu_vmap(struct isp_device * isp,const struct scatterlist * sglist,int sglen)443 ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
444 {
445 struct sg_table *sgt;
446 u32 da;
447
448 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
449 if (sgt == NULL)
450 return -ENOMEM;
451
452 sgt->sgl = (struct scatterlist *)sglist;
453 sgt->nents = sglen;
454 sgt->orig_nents = sglen;
455
456 da = omap_iommu_vmap(isp->domain, isp->dev, 0, sgt, IOMMU_FLAG);
457 if (IS_ERR_VALUE(da))
458 kfree(sgt);
459
460 return da;
461 }
462
463 /*
464 * ispmmu_vunmap - Unmap a device address from the ISP MMU
465 * @dev: Device pointer specific to the OMAP3 ISP.
466 * @da: Device address generated from a ispmmu_vmap call.
467 */
ispmmu_vunmap(struct isp_device * isp,dma_addr_t da)468 static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
469 {
470 struct sg_table *sgt;
471
472 sgt = omap_iommu_vunmap(isp->domain, isp->dev, (u32)da);
473 kfree(sgt);
474 }
475
476 /* -----------------------------------------------------------------------------
477 * Video queue operations
478 */
479
isp_video_queue_prepare(struct isp_video_queue * queue,unsigned int * nbuffers,unsigned int * size)480 static void isp_video_queue_prepare(struct isp_video_queue *queue,
481 unsigned int *nbuffers, unsigned int *size)
482 {
483 struct isp_video_fh *vfh =
484 container_of(queue, struct isp_video_fh, queue);
485 struct isp_video *video = vfh->video;
486
487 *size = vfh->format.fmt.pix.sizeimage;
488 if (*size == 0)
489 return;
490
491 *nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size));
492 }
493
isp_video_buffer_cleanup(struct isp_video_buffer * buf)494 static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
495 {
496 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
497 struct isp_buffer *buffer = to_isp_buffer(buf);
498 struct isp_video *video = vfh->video;
499
500 if (buffer->isp_addr) {
501 ispmmu_vunmap(video->isp, buffer->isp_addr);
502 buffer->isp_addr = 0;
503 }
504 }
505
isp_video_buffer_prepare(struct isp_video_buffer * buf)506 static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
507 {
508 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
509 struct isp_buffer *buffer = to_isp_buffer(buf);
510 struct isp_video *video = vfh->video;
511 unsigned long addr;
512
513 addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen);
514 if (IS_ERR_VALUE(addr))
515 return -EIO;
516
517 if (!IS_ALIGNED(addr, 32)) {
518 dev_dbg(video->isp->dev, "Buffer address must be "
519 "aligned to 32 bytes boundary.\n");
520 ispmmu_vunmap(video->isp, buffer->isp_addr);
521 return -EINVAL;
522 }
523
524 buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage;
525 buffer->isp_addr = addr;
526 return 0;
527 }
528
529 /*
530 * isp_video_buffer_queue - Add buffer to streaming queue
531 * @buf: Video buffer
532 *
533 * In memory-to-memory mode, start streaming on the pipeline if buffers are
534 * queued on both the input and the output, if the pipeline isn't already busy.
535 * If the pipeline is busy, it will be restarted in the output module interrupt
536 * handler.
537 */
isp_video_buffer_queue(struct isp_video_buffer * buf)538 static void isp_video_buffer_queue(struct isp_video_buffer *buf)
539 {
540 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
541 struct isp_buffer *buffer = to_isp_buffer(buf);
542 struct isp_video *video = vfh->video;
543 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
544 enum isp_pipeline_state state;
545 unsigned long flags;
546 unsigned int empty;
547 unsigned int start;
548
549 empty = list_empty(&video->dmaqueue);
550 list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue);
551
552 if (empty) {
553 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
554 state = ISP_PIPELINE_QUEUE_OUTPUT;
555 else
556 state = ISP_PIPELINE_QUEUE_INPUT;
557
558 spin_lock_irqsave(&pipe->lock, flags);
559 pipe->state |= state;
560 video->ops->queue(video, buffer);
561 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
562
563 start = isp_pipeline_ready(pipe);
564 if (start)
565 pipe->state |= ISP_PIPELINE_STREAM;
566 spin_unlock_irqrestore(&pipe->lock, flags);
567
568 if (start)
569 omap3isp_pipeline_set_stream(pipe,
570 ISP_PIPELINE_STREAM_SINGLESHOT);
571 }
572 }
573
574 static const struct isp_video_queue_operations isp_video_queue_ops = {
575 .queue_prepare = &isp_video_queue_prepare,
576 .buffer_prepare = &isp_video_buffer_prepare,
577 .buffer_queue = &isp_video_buffer_queue,
578 .buffer_cleanup = &isp_video_buffer_cleanup,
579 };
580
581 /*
582 * omap3isp_video_buffer_next - Complete the current buffer and return the next
583 * @video: ISP video object
584 *
585 * Remove the current video buffer from the DMA queue and fill its timestamp,
586 * field count and state fields before waking up its completion handler.
587 *
588 * For capture video nodes the buffer state is set to ISP_BUF_STATE_DONE if no
589 * error has been flagged in the pipeline, or to ISP_BUF_STATE_ERROR otherwise.
590 * For video output nodes the buffer state is always set to ISP_BUF_STATE_DONE.
591 *
592 * The DMA queue is expected to contain at least one buffer.
593 *
594 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
595 * empty.
596 */
omap3isp_video_buffer_next(struct isp_video * video)597 struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
598 {
599 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
600 struct isp_video_queue *queue = video->queue;
601 enum isp_pipeline_state state;
602 struct isp_video_buffer *buf;
603 unsigned long flags;
604 struct timespec ts;
605
606 spin_lock_irqsave(&queue->irqlock, flags);
607 if (WARN_ON(list_empty(&video->dmaqueue))) {
608 spin_unlock_irqrestore(&queue->irqlock, flags);
609 return NULL;
610 }
611
612 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
613 irqlist);
614 list_del(&buf->irqlist);
615 spin_unlock_irqrestore(&queue->irqlock, flags);
616
617 ktime_get_ts(&ts);
618 buf->vbuf.timestamp.tv_sec = ts.tv_sec;
619 buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
620
621 /* Do frame number propagation only if this is the output video node.
622 * Frame number either comes from the CSI receivers or it gets
623 * incremented here if H3A is not active.
624 * Note: There is no guarantee that the output buffer will finish
625 * first, so the input number might lag behind by 1 in some cases.
626 */
627 if (video == pipe->output && !pipe->do_propagation)
628 buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number);
629 else
630 buf->vbuf.sequence = atomic_read(&pipe->frame_number);
631
632 /* Report pipeline errors to userspace on the capture device side. */
633 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
634 buf->state = ISP_BUF_STATE_ERROR;
635 pipe->error = false;
636 } else {
637 buf->state = ISP_BUF_STATE_DONE;
638 }
639
640 wake_up(&buf->wait);
641
642 if (list_empty(&video->dmaqueue)) {
643 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
644 state = ISP_PIPELINE_QUEUE_OUTPUT
645 | ISP_PIPELINE_STREAM;
646 else
647 state = ISP_PIPELINE_QUEUE_INPUT
648 | ISP_PIPELINE_STREAM;
649
650 spin_lock_irqsave(&pipe->lock, flags);
651 pipe->state &= ~state;
652 if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
653 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
654 spin_unlock_irqrestore(&pipe->lock, flags);
655 return NULL;
656 }
657
658 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
659 spin_lock_irqsave(&pipe->lock, flags);
660 pipe->state &= ~ISP_PIPELINE_STREAM;
661 spin_unlock_irqrestore(&pipe->lock, flags);
662 }
663
664 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
665 irqlist);
666 buf->state = ISP_BUF_STATE_ACTIVE;
667 return to_isp_buffer(buf);
668 }
669
670 /*
671 * omap3isp_video_resume - Perform resume operation on the buffers
672 * @video: ISP video object
673 * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
674 *
675 * This function is intended to be used on suspend/resume scenario. It
676 * requests video queue layer to discard buffers marked as DONE if it's in
677 * continuous mode and requests ISP modules to queue again the ACTIVE buffer
678 * if there's any.
679 */
omap3isp_video_resume(struct isp_video * video,int continuous)680 void omap3isp_video_resume(struct isp_video *video, int continuous)
681 {
682 struct isp_buffer *buf = NULL;
683
684 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
685 omap3isp_video_queue_discard_done(video->queue);
686
687 if (!list_empty(&video->dmaqueue)) {
688 buf = list_first_entry(&video->dmaqueue,
689 struct isp_buffer, buffer.irqlist);
690 video->ops->queue(video, buf);
691 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
692 } else {
693 if (continuous)
694 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
695 }
696 }
697
698 /* -----------------------------------------------------------------------------
699 * V4L2 ioctls
700 */
701
702 static int
isp_video_querycap(struct file * file,void * fh,struct v4l2_capability * cap)703 isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
704 {
705 struct isp_video *video = video_drvdata(file);
706
707 strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
708 strlcpy(cap->card, video->video.name, sizeof(cap->card));
709 strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
710
711 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
712 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
713 else
714 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
715
716 return 0;
717 }
718
719 static int
isp_video_get_format(struct file * file,void * fh,struct v4l2_format * format)720 isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
721 {
722 struct isp_video_fh *vfh = to_isp_video_fh(fh);
723 struct isp_video *video = video_drvdata(file);
724
725 if (format->type != video->type)
726 return -EINVAL;
727
728 mutex_lock(&video->mutex);
729 *format = vfh->format;
730 mutex_unlock(&video->mutex);
731
732 return 0;
733 }
734
735 static int
isp_video_set_format(struct file * file,void * fh,struct v4l2_format * format)736 isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
737 {
738 struct isp_video_fh *vfh = to_isp_video_fh(fh);
739 struct isp_video *video = video_drvdata(file);
740 struct v4l2_mbus_framefmt fmt;
741
742 if (format->type != video->type)
743 return -EINVAL;
744
745 mutex_lock(&video->mutex);
746
747 /* Fill the bytesperline and sizeimage fields by converting to media bus
748 * format and back to pixel format.
749 */
750 isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
751 isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
752
753 vfh->format = *format;
754
755 mutex_unlock(&video->mutex);
756 return 0;
757 }
758
759 static int
isp_video_try_format(struct file * file,void * fh,struct v4l2_format * format)760 isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
761 {
762 struct isp_video *video = video_drvdata(file);
763 struct v4l2_subdev_format fmt;
764 struct v4l2_subdev *subdev;
765 u32 pad;
766 int ret;
767
768 if (format->type != video->type)
769 return -EINVAL;
770
771 subdev = isp_video_remote_subdev(video, &pad);
772 if (subdev == NULL)
773 return -EINVAL;
774
775 isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
776
777 fmt.pad = pad;
778 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
779 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
780 if (ret)
781 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
782
783 isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
784 return 0;
785 }
786
787 static int
isp_video_cropcap(struct file * file,void * fh,struct v4l2_cropcap * cropcap)788 isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
789 {
790 struct isp_video *video = video_drvdata(file);
791 struct v4l2_subdev *subdev;
792 int ret;
793
794 subdev = isp_video_remote_subdev(video, NULL);
795 if (subdev == NULL)
796 return -EINVAL;
797
798 mutex_lock(&video->mutex);
799 ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
800 mutex_unlock(&video->mutex);
801
802 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
803 }
804
805 static int
isp_video_get_crop(struct file * file,void * fh,struct v4l2_crop * crop)806 isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
807 {
808 struct isp_video *video = video_drvdata(file);
809 struct v4l2_subdev_format format;
810 struct v4l2_subdev *subdev;
811 u32 pad;
812 int ret;
813
814 subdev = isp_video_remote_subdev(video, &pad);
815 if (subdev == NULL)
816 return -EINVAL;
817
818 /* Try the get crop operation first and fallback to get format if not
819 * implemented.
820 */
821 ret = v4l2_subdev_call(subdev, video, g_crop, crop);
822 if (ret != -ENOIOCTLCMD)
823 return ret;
824
825 format.pad = pad;
826 format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
827 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
828 if (ret < 0)
829 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
830
831 crop->c.left = 0;
832 crop->c.top = 0;
833 crop->c.width = format.format.width;
834 crop->c.height = format.format.height;
835
836 return 0;
837 }
838
839 static int
isp_video_set_crop(struct file * file,void * fh,struct v4l2_crop * crop)840 isp_video_set_crop(struct file *file, void *fh, struct v4l2_crop *crop)
841 {
842 struct isp_video *video = video_drvdata(file);
843 struct v4l2_subdev *subdev;
844 int ret;
845
846 subdev = isp_video_remote_subdev(video, NULL);
847 if (subdev == NULL)
848 return -EINVAL;
849
850 mutex_lock(&video->mutex);
851 ret = v4l2_subdev_call(subdev, video, s_crop, crop);
852 mutex_unlock(&video->mutex);
853
854 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
855 }
856
857 static int
isp_video_get_param(struct file * file,void * fh,struct v4l2_streamparm * a)858 isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
859 {
860 struct isp_video_fh *vfh = to_isp_video_fh(fh);
861 struct isp_video *video = video_drvdata(file);
862
863 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
864 video->type != a->type)
865 return -EINVAL;
866
867 memset(a, 0, sizeof(*a));
868 a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
869 a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
870 a->parm.output.timeperframe = vfh->timeperframe;
871
872 return 0;
873 }
874
875 static int
isp_video_set_param(struct file * file,void * fh,struct v4l2_streamparm * a)876 isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
877 {
878 struct isp_video_fh *vfh = to_isp_video_fh(fh);
879 struct isp_video *video = video_drvdata(file);
880
881 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
882 video->type != a->type)
883 return -EINVAL;
884
885 if (a->parm.output.timeperframe.denominator == 0)
886 a->parm.output.timeperframe.denominator = 1;
887
888 vfh->timeperframe = a->parm.output.timeperframe;
889
890 return 0;
891 }
892
893 static int
isp_video_reqbufs(struct file * file,void * fh,struct v4l2_requestbuffers * rb)894 isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
895 {
896 struct isp_video_fh *vfh = to_isp_video_fh(fh);
897
898 return omap3isp_video_queue_reqbufs(&vfh->queue, rb);
899 }
900
901 static int
isp_video_querybuf(struct file * file,void * fh,struct v4l2_buffer * b)902 isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
903 {
904 struct isp_video_fh *vfh = to_isp_video_fh(fh);
905
906 return omap3isp_video_queue_querybuf(&vfh->queue, b);
907 }
908
909 static int
isp_video_qbuf(struct file * file,void * fh,struct v4l2_buffer * b)910 isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
911 {
912 struct isp_video_fh *vfh = to_isp_video_fh(fh);
913
914 return omap3isp_video_queue_qbuf(&vfh->queue, b);
915 }
916
917 static int
isp_video_dqbuf(struct file * file,void * fh,struct v4l2_buffer * b)918 isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
919 {
920 struct isp_video_fh *vfh = to_isp_video_fh(fh);
921
922 return omap3isp_video_queue_dqbuf(&vfh->queue, b,
923 file->f_flags & O_NONBLOCK);
924 }
925
926 /*
927 * Stream management
928 *
929 * Every ISP pipeline has a single input and a single output. The input can be
930 * either a sensor or a video node. The output is always a video node.
931 *
932 * As every pipeline has an output video node, the ISP video objects at the
933 * pipeline output stores the pipeline state. It tracks the streaming state of
934 * both the input and output, as well as the availability of buffers.
935 *
936 * In sensor-to-memory mode, frames are always available at the pipeline input.
937 * Starting the sensor usually requires I2C transfers and must be done in
938 * interruptible context. The pipeline is started and stopped synchronously
939 * to the stream on/off commands. All modules in the pipeline will get their
940 * subdev set stream handler called. The module at the end of the pipeline must
941 * delay starting the hardware until buffers are available at its output.
942 *
943 * In memory-to-memory mode, starting/stopping the stream requires
944 * synchronization between the input and output. ISP modules can't be stopped
945 * in the middle of a frame, and at least some of the modules seem to become
946 * busy as soon as they're started, even if they don't receive a frame start
947 * event. For that reason frames need to be processed in single-shot mode. The
948 * driver needs to wait until a frame is completely processed and written to
949 * memory before restarting the pipeline for the next frame. Pipelined
950 * processing might be possible but requires more testing.
951 *
952 * Stream start must be delayed until buffers are available at both the input
953 * and output. The pipeline must be started in the videobuf queue callback with
954 * the buffers queue spinlock held. The modules subdev set stream operation must
955 * not sleep.
956 */
957 static int
isp_video_streamon(struct file * file,void * fh,enum v4l2_buf_type type)958 isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
959 {
960 struct isp_video_fh *vfh = to_isp_video_fh(fh);
961 struct isp_video *video = video_drvdata(file);
962 enum isp_pipeline_state state;
963 struct isp_pipeline *pipe;
964 struct isp_video *far_end;
965 unsigned long flags;
966 int ret;
967
968 if (type != video->type)
969 return -EINVAL;
970
971 mutex_lock(&video->stream_lock);
972
973 if (video->streaming) {
974 mutex_unlock(&video->stream_lock);
975 return -EBUSY;
976 }
977
978 /* Start streaming on the pipeline. No link touching an entity in the
979 * pipeline can be activated or deactivated once streaming is started.
980 */
981 pipe = video->video.entity.pipe
982 ? to_isp_pipeline(&video->video.entity) : &video->pipe;
983 media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
984
985 /* Verify that the currently configured format matches the output of
986 * the connected subdev.
987 */
988 ret = isp_video_check_format(video, vfh);
989 if (ret < 0)
990 goto error;
991
992 video->bpl_padding = ret;
993 video->bpl_value = vfh->format.fmt.pix.bytesperline;
994
995 /* Find the ISP video node connected at the far end of the pipeline and
996 * update the pipeline.
997 */
998 far_end = isp_video_far_end(video);
999
1000 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
1001 state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
1002 pipe->input = far_end;
1003 pipe->output = video;
1004 } else {
1005 if (far_end == NULL) {
1006 ret = -EPIPE;
1007 goto error;
1008 }
1009
1010 state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
1011 pipe->input = video;
1012 pipe->output = far_end;
1013 }
1014
1015 if (video->isp->pdata->set_constraints)
1016 video->isp->pdata->set_constraints(video->isp, true);
1017 pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
1018
1019 /* Validate the pipeline and update its state. */
1020 ret = isp_video_validate_pipeline(pipe);
1021 if (ret < 0)
1022 goto error;
1023
1024 pipe->error = false;
1025
1026 spin_lock_irqsave(&pipe->lock, flags);
1027 pipe->state &= ~ISP_PIPELINE_STREAM;
1028 pipe->state |= state;
1029 spin_unlock_irqrestore(&pipe->lock, flags);
1030
1031 /* Set the maximum time per frame as the value requested by userspace.
1032 * This is a soft limit that can be overridden if the hardware doesn't
1033 * support the request limit.
1034 */
1035 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1036 pipe->max_timeperframe = vfh->timeperframe;
1037
1038 video->queue = &vfh->queue;
1039 INIT_LIST_HEAD(&video->dmaqueue);
1040 atomic_set(&pipe->frame_number, -1);
1041
1042 ret = omap3isp_video_queue_streamon(&vfh->queue);
1043 if (ret < 0)
1044 goto error;
1045
1046 /* In sensor-to-memory mode, the stream can be started synchronously
1047 * to the stream on command. In memory-to-memory mode, it will be
1048 * started when buffers are queued on both the input and output.
1049 */
1050 if (pipe->input == NULL) {
1051 ret = omap3isp_pipeline_set_stream(pipe,
1052 ISP_PIPELINE_STREAM_CONTINUOUS);
1053 if (ret < 0)
1054 goto error;
1055 spin_lock_irqsave(&video->queue->irqlock, flags);
1056 if (list_empty(&video->dmaqueue))
1057 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
1058 spin_unlock_irqrestore(&video->queue->irqlock, flags);
1059 }
1060
1061 error:
1062 if (ret < 0) {
1063 omap3isp_video_queue_streamoff(&vfh->queue);
1064 if (video->isp->pdata->set_constraints)
1065 video->isp->pdata->set_constraints(video->isp, false);
1066 media_entity_pipeline_stop(&video->video.entity);
1067 /* The DMA queue must be emptied here, otherwise CCDC interrupts
1068 * that will get triggered the next time the CCDC is powered up
1069 * will try to access buffers that might have been freed but
1070 * still present in the DMA queue. This can easily get triggered
1071 * if the above omap3isp_pipeline_set_stream() call fails on a
1072 * system with a free-running sensor.
1073 */
1074 INIT_LIST_HEAD(&video->dmaqueue);
1075 video->queue = NULL;
1076 }
1077
1078 if (!ret)
1079 video->streaming = 1;
1080
1081 mutex_unlock(&video->stream_lock);
1082 return ret;
1083 }
1084
1085 static int
isp_video_streamoff(struct file * file,void * fh,enum v4l2_buf_type type)1086 isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
1087 {
1088 struct isp_video_fh *vfh = to_isp_video_fh(fh);
1089 struct isp_video *video = video_drvdata(file);
1090 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
1091 enum isp_pipeline_state state;
1092 unsigned int streaming;
1093 unsigned long flags;
1094
1095 if (type != video->type)
1096 return -EINVAL;
1097
1098 mutex_lock(&video->stream_lock);
1099
1100 /* Make sure we're not streaming yet. */
1101 mutex_lock(&vfh->queue.lock);
1102 streaming = vfh->queue.streaming;
1103 mutex_unlock(&vfh->queue.lock);
1104
1105 if (!streaming)
1106 goto done;
1107
1108 /* Update the pipeline state. */
1109 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1110 state = ISP_PIPELINE_STREAM_OUTPUT
1111 | ISP_PIPELINE_QUEUE_OUTPUT;
1112 else
1113 state = ISP_PIPELINE_STREAM_INPUT
1114 | ISP_PIPELINE_QUEUE_INPUT;
1115
1116 spin_lock_irqsave(&pipe->lock, flags);
1117 pipe->state &= ~state;
1118 spin_unlock_irqrestore(&pipe->lock, flags);
1119
1120 /* Stop the stream. */
1121 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
1122 omap3isp_video_queue_streamoff(&vfh->queue);
1123 video->queue = NULL;
1124 video->streaming = 0;
1125
1126 if (video->isp->pdata->set_constraints)
1127 video->isp->pdata->set_constraints(video->isp, false);
1128 media_entity_pipeline_stop(&video->video.entity);
1129
1130 done:
1131 mutex_unlock(&video->stream_lock);
1132 return 0;
1133 }
1134
1135 static int
isp_video_enum_input(struct file * file,void * fh,struct v4l2_input * input)1136 isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1137 {
1138 if (input->index > 0)
1139 return -EINVAL;
1140
1141 strlcpy(input->name, "camera", sizeof(input->name));
1142 input->type = V4L2_INPUT_TYPE_CAMERA;
1143
1144 return 0;
1145 }
1146
1147 static int
isp_video_g_input(struct file * file,void * fh,unsigned int * input)1148 isp_video_g_input(struct file *file, void *fh, unsigned int *input)
1149 {
1150 *input = 0;
1151
1152 return 0;
1153 }
1154
1155 static int
isp_video_s_input(struct file * file,void * fh,unsigned int input)1156 isp_video_s_input(struct file *file, void *fh, unsigned int input)
1157 {
1158 return input == 0 ? 0 : -EINVAL;
1159 }
1160
1161 static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
1162 .vidioc_querycap = isp_video_querycap,
1163 .vidioc_g_fmt_vid_cap = isp_video_get_format,
1164 .vidioc_s_fmt_vid_cap = isp_video_set_format,
1165 .vidioc_try_fmt_vid_cap = isp_video_try_format,
1166 .vidioc_g_fmt_vid_out = isp_video_get_format,
1167 .vidioc_s_fmt_vid_out = isp_video_set_format,
1168 .vidioc_try_fmt_vid_out = isp_video_try_format,
1169 .vidioc_cropcap = isp_video_cropcap,
1170 .vidioc_g_crop = isp_video_get_crop,
1171 .vidioc_s_crop = isp_video_set_crop,
1172 .vidioc_g_parm = isp_video_get_param,
1173 .vidioc_s_parm = isp_video_set_param,
1174 .vidioc_reqbufs = isp_video_reqbufs,
1175 .vidioc_querybuf = isp_video_querybuf,
1176 .vidioc_qbuf = isp_video_qbuf,
1177 .vidioc_dqbuf = isp_video_dqbuf,
1178 .vidioc_streamon = isp_video_streamon,
1179 .vidioc_streamoff = isp_video_streamoff,
1180 .vidioc_enum_input = isp_video_enum_input,
1181 .vidioc_g_input = isp_video_g_input,
1182 .vidioc_s_input = isp_video_s_input,
1183 };
1184
1185 /* -----------------------------------------------------------------------------
1186 * V4L2 file operations
1187 */
1188
isp_video_open(struct file * file)1189 static int isp_video_open(struct file *file)
1190 {
1191 struct isp_video *video = video_drvdata(file);
1192 struct isp_video_fh *handle;
1193 int ret = 0;
1194
1195 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1196 if (handle == NULL)
1197 return -ENOMEM;
1198
1199 v4l2_fh_init(&handle->vfh, &video->video);
1200 v4l2_fh_add(&handle->vfh);
1201
1202 /* If this is the first user, initialise the pipeline. */
1203 if (omap3isp_get(video->isp) == NULL) {
1204 ret = -EBUSY;
1205 goto done;
1206 }
1207
1208 ret = omap3isp_pipeline_pm_use(&video->video.entity, 1);
1209 if (ret < 0) {
1210 omap3isp_put(video->isp);
1211 goto done;
1212 }
1213
1214 omap3isp_video_queue_init(&handle->queue, video->type,
1215 &isp_video_queue_ops, video->isp->dev,
1216 sizeof(struct isp_buffer));
1217
1218 memset(&handle->format, 0, sizeof(handle->format));
1219 handle->format.type = video->type;
1220 handle->timeperframe.denominator = 1;
1221
1222 handle->video = video;
1223 file->private_data = &handle->vfh;
1224
1225 done:
1226 if (ret < 0) {
1227 v4l2_fh_del(&handle->vfh);
1228 kfree(handle);
1229 }
1230
1231 return ret;
1232 }
1233
isp_video_release(struct file * file)1234 static int isp_video_release(struct file *file)
1235 {
1236 struct isp_video *video = video_drvdata(file);
1237 struct v4l2_fh *vfh = file->private_data;
1238 struct isp_video_fh *handle = to_isp_video_fh(vfh);
1239
1240 /* Disable streaming and free the buffers queue resources. */
1241 isp_video_streamoff(file, vfh, video->type);
1242
1243 mutex_lock(&handle->queue.lock);
1244 omap3isp_video_queue_cleanup(&handle->queue);
1245 mutex_unlock(&handle->queue.lock);
1246
1247 omap3isp_pipeline_pm_use(&video->video.entity, 0);
1248
1249 /* Release the file handle. */
1250 v4l2_fh_del(vfh);
1251 kfree(handle);
1252 file->private_data = NULL;
1253
1254 omap3isp_put(video->isp);
1255
1256 return 0;
1257 }
1258
isp_video_poll(struct file * file,poll_table * wait)1259 static unsigned int isp_video_poll(struct file *file, poll_table *wait)
1260 {
1261 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1262 struct isp_video_queue *queue = &vfh->queue;
1263
1264 return omap3isp_video_queue_poll(queue, file, wait);
1265 }
1266
isp_video_mmap(struct file * file,struct vm_area_struct * vma)1267 static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
1268 {
1269 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1270
1271 return omap3isp_video_queue_mmap(&vfh->queue, vma);
1272 }
1273
1274 static struct v4l2_file_operations isp_video_fops = {
1275 .owner = THIS_MODULE,
1276 .unlocked_ioctl = video_ioctl2,
1277 .open = isp_video_open,
1278 .release = isp_video_release,
1279 .poll = isp_video_poll,
1280 .mmap = isp_video_mmap,
1281 };
1282
1283 /* -----------------------------------------------------------------------------
1284 * ISP video core
1285 */
1286
1287 static const struct isp_video_operations isp_video_dummy_ops = {
1288 };
1289
omap3isp_video_init(struct isp_video * video,const char * name)1290 int omap3isp_video_init(struct isp_video *video, const char *name)
1291 {
1292 const char *direction;
1293 int ret;
1294
1295 switch (video->type) {
1296 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1297 direction = "output";
1298 video->pad.flags = MEDIA_PAD_FL_SINK;
1299 break;
1300 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1301 direction = "input";
1302 video->pad.flags = MEDIA_PAD_FL_SOURCE;
1303 break;
1304
1305 default:
1306 return -EINVAL;
1307 }
1308
1309 ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
1310 if (ret < 0)
1311 return ret;
1312
1313 mutex_init(&video->mutex);
1314 atomic_set(&video->active, 0);
1315
1316 spin_lock_init(&video->pipe.lock);
1317 mutex_init(&video->stream_lock);
1318
1319 /* Initialize the video device. */
1320 if (video->ops == NULL)
1321 video->ops = &isp_video_dummy_ops;
1322
1323 video->video.fops = &isp_video_fops;
1324 snprintf(video->video.name, sizeof(video->video.name),
1325 "OMAP3 ISP %s %s", name, direction);
1326 video->video.vfl_type = VFL_TYPE_GRABBER;
1327 video->video.release = video_device_release_empty;
1328 video->video.ioctl_ops = &isp_video_ioctl_ops;
1329 video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED;
1330
1331 video_set_drvdata(&video->video, video);
1332
1333 return 0;
1334 }
1335
omap3isp_video_cleanup(struct isp_video * video)1336 void omap3isp_video_cleanup(struct isp_video *video)
1337 {
1338 media_entity_cleanup(&video->video.entity);
1339 mutex_destroy(&video->stream_lock);
1340 mutex_destroy(&video->mutex);
1341 }
1342
omap3isp_video_register(struct isp_video * video,struct v4l2_device * vdev)1343 int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
1344 {
1345 int ret;
1346
1347 video->video.v4l2_dev = vdev;
1348
1349 ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
1350 if (ret < 0)
1351 printk(KERN_ERR "%s: could not register video device (%d)\n",
1352 __func__, ret);
1353
1354 return ret;
1355 }
1356
omap3isp_video_unregister(struct isp_video * video)1357 void omap3isp_video_unregister(struct isp_video *video)
1358 {
1359 if (video_is_registered(&video->video))
1360 video_unregister_device(&video->video);
1361 }
1362