blob: 80ca14b7c4d305a48149c7ce5bc3ece8817dc1f7 [file] [log] [blame]
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -03001/*
2 * Samsung TV Mixer driver
3 *
4 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
5 *
6 * Tomasz Stanislawski, <t.stanislaws@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published
10 * by the Free Software Foundation. either version 2 of the License,
11 * or (at your option) any later version
12 */
13
Sachin Kamatf5c99032012-06-11 06:13:52 -030014#define pr_fmt(fmt) "s5p-tv (mixer): " fmt
15
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -030016#include "mixer.h"
17
18#include <media/v4l2-ioctl.h>
19#include <linux/videodev2.h>
20#include <linux/mm.h>
Marek Szyprowskifc8ac772011-11-21 05:02:38 -030021#include <linux/module.h>
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -030022#include <linux/timer.h>
23#include <media/videobuf2-dma-contig.h>
24
25static int find_reg_callback(struct device *dev, void *p)
26{
27 struct v4l2_subdev **sd = p;
28
29 *sd = dev_get_drvdata(dev);
30 /* non-zero value stops iteration */
31 return 1;
32}
33
34static struct v4l2_subdev *find_and_register_subdev(
35 struct mxr_device *mdev, char *module_name)
36{
37 struct device_driver *drv;
38 struct v4l2_subdev *sd = NULL;
39 int ret;
40
41 /* TODO: add waiting until probe is finished */
42 drv = driver_find(module_name, &platform_bus_type);
43 if (!drv) {
44 mxr_warn(mdev, "module %s is missing\n", module_name);
45 return NULL;
46 }
47 /* driver refcnt is increased, it is safe to iterate over devices */
48 ret = driver_for_each_device(drv, NULL, &sd, find_reg_callback);
49 /* ret == 0 means that find_reg_callback was never executed */
50 if (sd == NULL) {
51 mxr_warn(mdev, "module %s provides no subdev!\n", module_name);
52 goto done;
53 }
54 /* v4l2_device_register_subdev detects if sd is NULL */
55 ret = v4l2_device_register_subdev(&mdev->v4l2_dev, sd);
56 if (ret) {
57 mxr_warn(mdev, "failed to register subdev %s\n", sd->name);
58 sd = NULL;
59 }
60
61done:
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -030062 return sd;
63}
64
65int __devinit mxr_acquire_video(struct mxr_device *mdev,
66 struct mxr_output_conf *output_conf, int output_count)
67{
68 struct device *dev = mdev->dev;
69 struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
70 int i;
71 int ret = 0;
72 struct v4l2_subdev *sd;
73
74 strlcpy(v4l2_dev->name, dev_name(mdev->dev), sizeof(v4l2_dev->name));
75 /* prepare context for V4L2 device */
76 ret = v4l2_device_register(dev, v4l2_dev);
77 if (ret) {
78 mxr_err(mdev, "could not register v4l2 device.\n");
79 goto fail;
80 }
81
82 mdev->alloc_ctx = vb2_dma_contig_init_ctx(mdev->dev);
83 if (IS_ERR_OR_NULL(mdev->alloc_ctx)) {
84 mxr_err(mdev, "could not acquire vb2 allocator\n");
85 goto fail_v4l2_dev;
86 }
87
88 /* registering outputs */
89 mdev->output_cnt = 0;
90 for (i = 0; i < output_count; ++i) {
91 struct mxr_output_conf *conf = &output_conf[i];
92 struct mxr_output *out;
93
94 sd = find_and_register_subdev(mdev, conf->module_name);
95 /* trying to register next output */
96 if (sd == NULL)
97 continue;
Sachin Kamatc0d51202012-11-26 01:49:01 -030098 out = kzalloc(sizeof(*out), GFP_KERNEL);
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -030099 if (out == NULL) {
100 mxr_err(mdev, "no memory for '%s'\n",
101 conf->output_name);
102 ret = -ENOMEM;
103 /* registered subdevs are removed in fail_v4l2_dev */
104 goto fail_output;
105 }
106 strlcpy(out->name, conf->output_name, sizeof(out->name));
107 out->sd = sd;
108 out->cookie = conf->cookie;
109 mdev->output[mdev->output_cnt++] = out;
110 mxr_info(mdev, "added output '%s' from module '%s'\n",
111 conf->output_name, conf->module_name);
112 /* checking if maximal number of outputs is reached */
113 if (mdev->output_cnt >= MXR_MAX_OUTPUTS)
114 break;
115 }
116
117 if (mdev->output_cnt == 0) {
118 mxr_err(mdev, "failed to register any output\n");
119 ret = -ENODEV;
120 /* skipping fail_output because there is nothing to free */
121 goto fail_vb2_allocator;
122 }
123
124 return 0;
125
126fail_output:
127 /* kfree is NULL-safe */
128 for (i = 0; i < mdev->output_cnt; ++i)
129 kfree(mdev->output[i]);
Sachin Kamatc0d51202012-11-26 01:49:01 -0300130 memset(mdev->output, 0, sizeof(mdev->output));
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300131
132fail_vb2_allocator:
133 /* freeing allocator context */
134 vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
135
136fail_v4l2_dev:
137 /* NOTE: automatically unregister all subdevs */
138 v4l2_device_unregister(v4l2_dev);
139
140fail:
141 return ret;
142}
143
Sachin Kamate6364a92012-03-12 03:13:34 -0300144void mxr_release_video(struct mxr_device *mdev)
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300145{
146 int i;
147
148 /* kfree is NULL-safe */
149 for (i = 0; i < mdev->output_cnt; ++i)
150 kfree(mdev->output[i]);
151
152 vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
153 v4l2_device_unregister(&mdev->v4l2_dev);
154}
155
156static int mxr_querycap(struct file *file, void *priv,
157 struct v4l2_capability *cap)
158{
159 struct mxr_layer *layer = video_drvdata(file);
160
161 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
162
Sachin Kamatc0d51202012-11-26 01:49:01 -0300163 strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof(cap->driver));
164 strlcpy(cap->card, layer->vfd.name, sizeof(cap->card));
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300165 sprintf(cap->bus_info, "%d", layer->idx);
Sylwester Nawrocki72085072012-09-24 06:03:04 -0300166 cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
167 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300168
169 return 0;
170}
171
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300172static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300173{
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300174 mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
175 geo->src.full_width, geo->src.full_height);
176 mxr_dbg(mdev, "src.size = (%u, %u)\n",
177 geo->src.width, geo->src.height);
178 mxr_dbg(mdev, "src.offset = (%u, %u)\n",
179 geo->src.x_offset, geo->src.y_offset);
180 mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
181 geo->dst.full_width, geo->dst.full_height);
182 mxr_dbg(mdev, "dst.size = (%u, %u)\n",
183 geo->dst.width, geo->dst.height);
184 mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
185 geo->dst.x_offset, geo->dst.y_offset);
186 mxr_dbg(mdev, "ratio = (%u, %u)\n",
187 geo->x_ratio, geo->y_ratio);
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300188}
189
190static void mxr_layer_default_geo(struct mxr_layer *layer)
191{
192 struct mxr_device *mdev = layer->mdev;
193 struct v4l2_mbus_framefmt mbus_fmt;
194
Sachin Kamatc0d51202012-11-26 01:49:01 -0300195 memset(&layer->geo, 0, sizeof(layer->geo));
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300196
197 mxr_get_mbus_fmt(mdev, &mbus_fmt);
198
199 layer->geo.dst.full_width = mbus_fmt.width;
200 layer->geo.dst.full_height = mbus_fmt.height;
201 layer->geo.dst.width = layer->geo.dst.full_width;
202 layer->geo.dst.height = layer->geo.dst.full_height;
203 layer->geo.dst.field = mbus_fmt.field;
204
205 layer->geo.src.full_width = mbus_fmt.width;
206 layer->geo.src.full_height = mbus_fmt.height;
207 layer->geo.src.width = layer->geo.src.full_width;
208 layer->geo.src.height = layer->geo.src.full_height;
209
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300210 mxr_geometry_dump(mdev, &layer->geo);
211 layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
212 mxr_geometry_dump(mdev, &layer->geo);
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300213}
214
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300215static void mxr_layer_update_output(struct mxr_layer *layer)
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300216{
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300217 struct mxr_device *mdev = layer->mdev;
218 struct v4l2_mbus_framefmt mbus_fmt;
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300219
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300220 mxr_get_mbus_fmt(mdev, &mbus_fmt);
221 /* checking if update is needed */
222 if (layer->geo.dst.full_width == mbus_fmt.width &&
223 layer->geo.dst.full_height == mbus_fmt.width)
224 return;
225
226 layer->geo.dst.full_width = mbus_fmt.width;
227 layer->geo.dst.full_height = mbus_fmt.height;
228 layer->geo.dst.field = mbus_fmt.field;
229 layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
230
231 mxr_geometry_dump(mdev, &layer->geo);
232}
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300233
234static const struct mxr_format *find_format_by_fourcc(
235 struct mxr_layer *layer, unsigned long fourcc);
236static const struct mxr_format *find_format_by_index(
237 struct mxr_layer *layer, unsigned long index);
238
239static int mxr_enum_fmt(struct file *file, void *priv,
240 struct v4l2_fmtdesc *f)
241{
242 struct mxr_layer *layer = video_drvdata(file);
243 struct mxr_device *mdev = layer->mdev;
244 const struct mxr_format *fmt;
245
246 mxr_dbg(mdev, "%s\n", __func__);
247 fmt = find_format_by_index(layer, f->index);
248 if (fmt == NULL)
249 return -EINVAL;
250
251 strlcpy(f->description, fmt->name, sizeof(f->description));
252 f->pixelformat = fmt->fourcc;
253
254 return 0;
255}
256
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300257static unsigned int divup(unsigned int divident, unsigned int divisor)
258{
259 return (divident + divisor - 1) / divisor;
260}
261
262unsigned long mxr_get_plane_size(const struct mxr_block *blk,
263 unsigned int width, unsigned int height)
264{
265 unsigned int bl_width = divup(width, blk->width);
266 unsigned int bl_height = divup(height, blk->height);
267
268 return bl_width * bl_height * blk->size;
269}
270
271static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
272 const struct mxr_format *fmt, u32 width, u32 height)
273{
274 int i;
275
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300276 /* checking if nothing to fill */
277 if (!planes)
278 return;
279
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300280 memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
281 for (i = 0; i < fmt->num_planes; ++i) {
282 struct v4l2_plane_pix_format *plane = planes
283 + fmt->plane2subframe[i];
284 const struct mxr_block *blk = &fmt->plane[i];
285 u32 bl_width = divup(width, blk->width);
286 u32 bl_height = divup(height, blk->height);
287 u32 sizeimage = bl_width * bl_height * blk->size;
288 u16 bytesperline = bl_width * blk->size / blk->height;
289
290 plane->sizeimage += sizeimage;
291 plane->bytesperline = max(plane->bytesperline, bytesperline);
292 }
293}
294
295static int mxr_g_fmt(struct file *file, void *priv,
296 struct v4l2_format *f)
297{
298 struct mxr_layer *layer = video_drvdata(file);
299 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
300
301 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
302
303 pix->width = layer->geo.src.full_width;
304 pix->height = layer->geo.src.full_height;
305 pix->field = V4L2_FIELD_NONE;
306 pix->pixelformat = layer->fmt->fourcc;
307 pix->colorspace = layer->fmt->colorspace;
308 mxr_mplane_fill(pix->plane_fmt, layer->fmt, pix->width, pix->height);
309
310 return 0;
311}
312
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300313static int mxr_s_fmt(struct file *file, void *priv,
314 struct v4l2_format *f)
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300315{
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300316 struct mxr_layer *layer = video_drvdata(file);
317 const struct mxr_format *fmt;
318 struct v4l2_pix_format_mplane *pix;
319 struct mxr_device *mdev = layer->mdev;
320 struct mxr_geometry *geo = &layer->geo;
321
322 mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
323
324 pix = &f->fmt.pix_mp;
325 fmt = find_format_by_fourcc(layer, pix->pixelformat);
326 if (fmt == NULL) {
327 mxr_warn(mdev, "not recognized fourcc: %08x\n",
328 pix->pixelformat);
329 return -EINVAL;
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300330 }
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300331 layer->fmt = fmt;
332 /* set source size to highest accepted value */
333 geo->src.full_width = max(geo->dst.full_width, pix->width);
334 geo->src.full_height = max(geo->dst.full_height, pix->height);
335 layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
336 mxr_geometry_dump(mdev, &layer->geo);
337 /* set cropping to total visible screen */
338 geo->src.width = pix->width;
339 geo->src.height = pix->height;
340 geo->src.x_offset = 0;
341 geo->src.y_offset = 0;
342 /* assure consistency of geometry */
343 layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
344 mxr_geometry_dump(mdev, &layer->geo);
345 /* set full size to lowest possible value */
346 geo->src.full_width = 0;
347 geo->src.full_height = 0;
348 layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
349 mxr_geometry_dump(mdev, &layer->geo);
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300350
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300351 /* returning results */
352 mxr_g_fmt(file, priv, f);
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300353
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300354 return 0;
355}
356
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300357static int mxr_g_selection(struct file *file, void *fh,
358 struct v4l2_selection *s)
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300359{
360 struct mxr_layer *layer = video_drvdata(file);
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300361 struct mxr_geometry *geo = &layer->geo;
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300362
363 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300364
365 if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
366 s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300367 return -EINVAL;
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300368
369 switch (s->target) {
Sylwester Nawrockic1334822012-05-20 11:17:12 -0300370 case V4L2_SEL_TGT_CROP:
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300371 s->r.left = geo->src.x_offset;
372 s->r.top = geo->src.y_offset;
373 s->r.width = geo->src.width;
374 s->r.height = geo->src.height;
375 break;
376 case V4L2_SEL_TGT_CROP_DEFAULT:
377 case V4L2_SEL_TGT_CROP_BOUNDS:
378 s->r.left = 0;
379 s->r.top = 0;
380 s->r.width = geo->src.full_width;
381 s->r.height = geo->src.full_height;
382 break;
Sylwester Nawrockic1334822012-05-20 11:17:12 -0300383 case V4L2_SEL_TGT_COMPOSE:
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300384 case V4L2_SEL_TGT_COMPOSE_PADDED:
385 s->r.left = geo->dst.x_offset;
386 s->r.top = geo->dst.y_offset;
387 s->r.width = geo->dst.width;
388 s->r.height = geo->dst.height;
389 break;
390 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
391 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
392 s->r.left = 0;
393 s->r.top = 0;
394 s->r.width = geo->dst.full_width;
395 s->r.height = geo->dst.full_height;
396 break;
397 default:
398 return -EINVAL;
399 }
400
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300401 return 0;
402}
403
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300404/* returns 1 if rectangle 'a' is inside 'b' */
405static int mxr_is_rect_inside(struct v4l2_rect *a, struct v4l2_rect *b)
406{
407 if (a->left < b->left)
408 return 0;
409 if (a->top < b->top)
410 return 0;
411 if (a->left + a->width > b->left + b->width)
412 return 0;
413 if (a->top + a->height > b->top + b->height)
414 return 0;
415 return 1;
416}
417
418static int mxr_s_selection(struct file *file, void *fh,
419 struct v4l2_selection *s)
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300420{
421 struct mxr_layer *layer = video_drvdata(file);
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300422 struct mxr_geometry *geo = &layer->geo;
423 struct mxr_crop *target = NULL;
424 enum mxr_geometry_stage stage;
425 struct mxr_geometry tmp;
426 struct v4l2_rect res;
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300427
Sachin Kamatc0d51202012-11-26 01:49:01 -0300428 memset(&res, 0, sizeof(res));
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300429
430 mxr_dbg(layer->mdev, "%s: rect: %dx%d@%d,%d\n", __func__,
431 s->r.width, s->r.height, s->r.left, s->r.top);
432
433 if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
434 s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300435 return -EINVAL;
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300436
437 switch (s->target) {
438 /* ignore read-only targets */
439 case V4L2_SEL_TGT_CROP_DEFAULT:
440 case V4L2_SEL_TGT_CROP_BOUNDS:
441 res.width = geo->src.full_width;
442 res.height = geo->src.full_height;
443 break;
444
445 /* ignore read-only targets */
446 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
447 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
448 res.width = geo->dst.full_width;
449 res.height = geo->dst.full_height;
450 break;
451
Sylwester Nawrockic1334822012-05-20 11:17:12 -0300452 case V4L2_SEL_TGT_CROP:
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300453 target = &geo->src;
454 stage = MXR_GEOMETRY_CROP;
455 break;
Sylwester Nawrockic1334822012-05-20 11:17:12 -0300456 case V4L2_SEL_TGT_COMPOSE:
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300457 case V4L2_SEL_TGT_COMPOSE_PADDED:
458 target = &geo->dst;
459 stage = MXR_GEOMETRY_COMPOSE;
460 break;
461 default:
462 return -EINVAL;
463 }
464 /* apply change and update geometry if needed */
465 if (target) {
466 /* backup current geometry if setup fails */
Sachin Kamatc0d51202012-11-26 01:49:01 -0300467 memcpy(&tmp, geo, sizeof(tmp));
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300468
469 /* apply requested selection */
470 target->x_offset = s->r.left;
471 target->y_offset = s->r.top;
472 target->width = s->r.width;
473 target->height = s->r.height;
474
475 layer->ops.fix_geometry(layer, stage, s->flags);
476
477 /* retrieve update selection rectangle */
478 res.left = target->x_offset;
479 res.top = target->y_offset;
480 res.width = target->width;
481 res.height = target->height;
482
483 mxr_geometry_dump(layer->mdev, &layer->geo);
484 }
485
486 /* checking if the rectangle satisfies constraints */
487 if ((s->flags & V4L2_SEL_FLAG_LE) && !mxr_is_rect_inside(&res, &s->r))
488 goto fail;
489 if ((s->flags & V4L2_SEL_FLAG_GE) && !mxr_is_rect_inside(&s->r, &res))
490 goto fail;
491
492 /* return result rectangle */
493 s->r = res;
494
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300495 return 0;
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300496fail:
497 /* restore old geometry, which is not touched if target is NULL */
498 if (target)
Sachin Kamatc0d51202012-11-26 01:49:01 -0300499 memcpy(geo, &tmp, sizeof(tmp));
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300500 return -ERANGE;
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300501}
502
503static int mxr_enum_dv_presets(struct file *file, void *fh,
504 struct v4l2_dv_enum_preset *preset)
505{
506 struct mxr_layer *layer = video_drvdata(file);
507 struct mxr_device *mdev = layer->mdev;
508 int ret;
509
510 /* lock protects from changing sd_out */
511 mutex_lock(&mdev->mutex);
512 ret = v4l2_subdev_call(to_outsd(mdev), video, enum_dv_presets, preset);
513 mutex_unlock(&mdev->mutex);
514
515 return ret ? -EINVAL : 0;
516}
517
518static int mxr_s_dv_preset(struct file *file, void *fh,
519 struct v4l2_dv_preset *preset)
520{
521 struct mxr_layer *layer = video_drvdata(file);
522 struct mxr_device *mdev = layer->mdev;
523 int ret;
524
525 /* lock protects from changing sd_out */
526 mutex_lock(&mdev->mutex);
527
528 /* preset change cannot be done while there is an entity
529 * dependant on output configuration
530 */
531 if (mdev->n_output > 0) {
532 mutex_unlock(&mdev->mutex);
533 return -EBUSY;
534 }
535
536 ret = v4l2_subdev_call(to_outsd(mdev), video, s_dv_preset, preset);
537
538 mutex_unlock(&mdev->mutex);
539
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300540 mxr_layer_update_output(layer);
541
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300542 /* any failure should return EINVAL according to V4L2 doc */
543 return ret ? -EINVAL : 0;
544}
545
546static int mxr_g_dv_preset(struct file *file, void *fh,
547 struct v4l2_dv_preset *preset)
548{
549 struct mxr_layer *layer = video_drvdata(file);
550 struct mxr_device *mdev = layer->mdev;
551 int ret;
552
553 /* lock protects from changing sd_out */
554 mutex_lock(&mdev->mutex);
555 ret = v4l2_subdev_call(to_outsd(mdev), video, g_dv_preset, preset);
556 mutex_unlock(&mdev->mutex);
557
558 return ret ? -EINVAL : 0;
559}
560
561static int mxr_s_std(struct file *file, void *fh, v4l2_std_id *norm)
562{
563 struct mxr_layer *layer = video_drvdata(file);
564 struct mxr_device *mdev = layer->mdev;
565 int ret;
566
567 /* lock protects from changing sd_out */
568 mutex_lock(&mdev->mutex);
569
570 /* standard change cannot be done while there is an entity
571 * dependant on output configuration
572 */
573 if (mdev->n_output > 0) {
574 mutex_unlock(&mdev->mutex);
575 return -EBUSY;
576 }
577
578 ret = v4l2_subdev_call(to_outsd(mdev), video, s_std_output, *norm);
579
580 mutex_unlock(&mdev->mutex);
581
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300582 mxr_layer_update_output(layer);
583
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300584 return ret ? -EINVAL : 0;
585}
586
587static int mxr_g_std(struct file *file, void *fh, v4l2_std_id *norm)
588{
589 struct mxr_layer *layer = video_drvdata(file);
590 struct mxr_device *mdev = layer->mdev;
591 int ret;
592
593 /* lock protects from changing sd_out */
594 mutex_lock(&mdev->mutex);
595 ret = v4l2_subdev_call(to_outsd(mdev), video, g_std_output, norm);
596 mutex_unlock(&mdev->mutex);
597
598 return ret ? -EINVAL : 0;
599}
600
601static int mxr_enum_output(struct file *file, void *fh, struct v4l2_output *a)
602{
603 struct mxr_layer *layer = video_drvdata(file);
604 struct mxr_device *mdev = layer->mdev;
605 struct mxr_output *out;
606 struct v4l2_subdev *sd;
607
608 if (a->index >= mdev->output_cnt)
609 return -EINVAL;
610 out = mdev->output[a->index];
611 BUG_ON(out == NULL);
612 sd = out->sd;
613 strlcpy(a->name, out->name, sizeof(a->name));
614
615 /* try to obtain supported tv norms */
616 v4l2_subdev_call(sd, video, g_tvnorms_output, &a->std);
617 a->capabilities = 0;
618 if (sd->ops->video && sd->ops->video->s_dv_preset)
619 a->capabilities |= V4L2_OUT_CAP_PRESETS;
620 if (sd->ops->video && sd->ops->video->s_std_output)
621 a->capabilities |= V4L2_OUT_CAP_STD;
622 a->type = V4L2_OUTPUT_TYPE_ANALOG;
623
624 return 0;
625}
626
627static int mxr_s_output(struct file *file, void *fh, unsigned int i)
628{
629 struct video_device *vfd = video_devdata(file);
630 struct mxr_layer *layer = video_drvdata(file);
631 struct mxr_device *mdev = layer->mdev;
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300632
633 if (i >= mdev->output_cnt || mdev->output[i] == NULL)
634 return -EINVAL;
635
636 mutex_lock(&mdev->mutex);
637 if (mdev->n_output > 0) {
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300638 mutex_unlock(&mdev->mutex);
639 return -EBUSY;
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300640 }
641 mdev->current_output = i;
642 vfd->tvnorms = 0;
643 v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
644 &vfd->tvnorms);
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300645 mutex_unlock(&mdev->mutex);
646
647 /* update layers geometry */
648 mxr_layer_update_output(layer);
649
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300650 mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
651
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300652 return 0;
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300653}
654
655static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
656{
657 struct mxr_layer *layer = video_drvdata(file);
658 struct mxr_device *mdev = layer->mdev;
659
660 mutex_lock(&mdev->mutex);
661 *p = mdev->current_output;
662 mutex_unlock(&mdev->mutex);
663
664 return 0;
665}
666
667static int mxr_reqbufs(struct file *file, void *priv,
668 struct v4l2_requestbuffers *p)
669{
670 struct mxr_layer *layer = video_drvdata(file);
671
672 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
673 return vb2_reqbufs(&layer->vb_queue, p);
674}
675
676static int mxr_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
677{
678 struct mxr_layer *layer = video_drvdata(file);
679
680 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
681 return vb2_querybuf(&layer->vb_queue, p);
682}
683
684static int mxr_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
685{
686 struct mxr_layer *layer = video_drvdata(file);
687
688 mxr_dbg(layer->mdev, "%s:%d(%d)\n", __func__, __LINE__, p->index);
689 return vb2_qbuf(&layer->vb_queue, p);
690}
691
692static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
693{
694 struct mxr_layer *layer = video_drvdata(file);
695
696 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
697 return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
698}
699
Tomasz Stanislawski3c44efd2012-06-14 11:32:27 -0300700static int mxr_expbuf(struct file *file, void *priv,
701 struct v4l2_exportbuffer *eb)
702{
703 struct mxr_layer *layer = video_drvdata(file);
704
705 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
706 return vb2_expbuf(&layer->vb_queue, eb);
707}
708
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300709static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
710{
711 struct mxr_layer *layer = video_drvdata(file);
712
713 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
714 return vb2_streamon(&layer->vb_queue, i);
715}
716
717static int mxr_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
718{
719 struct mxr_layer *layer = video_drvdata(file);
720
721 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
722 return vb2_streamoff(&layer->vb_queue, i);
723}
724
725static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
726 .vidioc_querycap = mxr_querycap,
727 /* format handling */
Sylwester Nawrocki72085072012-09-24 06:03:04 -0300728 .vidioc_enum_fmt_vid_out_mplane = mxr_enum_fmt,
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300729 .vidioc_s_fmt_vid_out_mplane = mxr_s_fmt,
730 .vidioc_g_fmt_vid_out_mplane = mxr_g_fmt,
731 /* buffer control */
732 .vidioc_reqbufs = mxr_reqbufs,
733 .vidioc_querybuf = mxr_querybuf,
734 .vidioc_qbuf = mxr_qbuf,
735 .vidioc_dqbuf = mxr_dqbuf,
Tomasz Stanislawski3c44efd2012-06-14 11:32:27 -0300736 .vidioc_expbuf = mxr_expbuf,
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300737 /* Streaming control */
738 .vidioc_streamon = mxr_streamon,
739 .vidioc_streamoff = mxr_streamoff,
740 /* Preset functions */
741 .vidioc_enum_dv_presets = mxr_enum_dv_presets,
742 .vidioc_s_dv_preset = mxr_s_dv_preset,
743 .vidioc_g_dv_preset = mxr_g_dv_preset,
744 /* analog TV standard functions */
745 .vidioc_s_std = mxr_s_std,
746 .vidioc_g_std = mxr_g_std,
747 /* Output handling */
748 .vidioc_enum_output = mxr_enum_output,
749 .vidioc_s_output = mxr_s_output,
750 .vidioc_g_output = mxr_g_output,
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300751 /* selection ioctls */
752 .vidioc_g_selection = mxr_g_selection,
753 .vidioc_s_selection = mxr_s_selection,
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300754};
755
756static int mxr_video_open(struct file *file)
757{
758 struct mxr_layer *layer = video_drvdata(file);
759 struct mxr_device *mdev = layer->mdev;
760 int ret = 0;
761
762 mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
Hans Verkuil00ccdc32012-06-24 07:02:01 -0300763 if (mutex_lock_interruptible(&layer->mutex))
764 return -ERESTARTSYS;
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300765 /* assure device probe is finished */
766 wait_for_device_probe();
767 /* creating context for file descriptor */
768 ret = v4l2_fh_open(file);
769 if (ret) {
770 mxr_err(mdev, "v4l2_fh_open failed\n");
Hans Verkuil00ccdc32012-06-24 07:02:01 -0300771 goto unlock;
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300772 }
773
774 /* leaving if layer is already initialized */
775 if (!v4l2_fh_is_singular_file(file))
Hans Verkuil00ccdc32012-06-24 07:02:01 -0300776 goto unlock;
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300777
778 /* FIXME: should power be enabled on open? */
779 ret = mxr_power_get(mdev);
780 if (ret) {
781 mxr_err(mdev, "power on failed\n");
782 goto fail_fh_open;
783 }
784
785 ret = vb2_queue_init(&layer->vb_queue);
786 if (ret != 0) {
787 mxr_err(mdev, "failed to initialize vb2 queue\n");
788 goto fail_power;
789 }
790 /* set default format, first on the list */
791 layer->fmt = layer->fmt_array[0];
792 /* setup default geometry */
793 mxr_layer_default_geo(layer);
Hans Verkuil00ccdc32012-06-24 07:02:01 -0300794 mutex_unlock(&layer->mutex);
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300795
796 return 0;
797
798fail_power:
799 mxr_power_put(mdev);
800
801fail_fh_open:
802 v4l2_fh_release(file);
803
Hans Verkuil00ccdc32012-06-24 07:02:01 -0300804unlock:
805 mutex_unlock(&layer->mutex);
806
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300807 return ret;
808}
809
810static unsigned int
811mxr_video_poll(struct file *file, struct poll_table_struct *wait)
812{
813 struct mxr_layer *layer = video_drvdata(file);
Hans Verkuil00ccdc32012-06-24 07:02:01 -0300814 unsigned int res;
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300815
816 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
817
Hans Verkuil00ccdc32012-06-24 07:02:01 -0300818 mutex_lock(&layer->mutex);
819 res = vb2_poll(&layer->vb_queue, file, wait);
820 mutex_unlock(&layer->mutex);
821 return res;
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300822}
823
824static int mxr_video_mmap(struct file *file, struct vm_area_struct *vma)
825{
826 struct mxr_layer *layer = video_drvdata(file);
Hans Verkuil00ccdc32012-06-24 07:02:01 -0300827 int ret;
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300828
829 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
830
Hans Verkuil00ccdc32012-06-24 07:02:01 -0300831 if (mutex_lock_interruptible(&layer->mutex))
832 return -ERESTARTSYS;
833 ret = vb2_mmap(&layer->vb_queue, vma);
834 mutex_unlock(&layer->mutex);
835 return ret;
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300836}
837
838static int mxr_video_release(struct file *file)
839{
840 struct mxr_layer *layer = video_drvdata(file);
841
842 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
Hans Verkuil00ccdc32012-06-24 07:02:01 -0300843 mutex_lock(&layer->mutex);
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300844 if (v4l2_fh_is_singular_file(file)) {
845 vb2_queue_release(&layer->vb_queue);
846 mxr_power_put(layer->mdev);
847 }
848 v4l2_fh_release(file);
Hans Verkuil00ccdc32012-06-24 07:02:01 -0300849 mutex_unlock(&layer->mutex);
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300850 return 0;
851}
852
853static const struct v4l2_file_operations mxr_fops = {
854 .owner = THIS_MODULE,
855 .open = mxr_video_open,
856 .poll = mxr_video_poll,
857 .mmap = mxr_video_mmap,
858 .release = mxr_video_release,
859 .unlocked_ioctl = video_ioctl2,
860};
861
Guennadi Liakhovetskifc714e702011-08-24 10:30:21 -0300862static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
863 unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[],
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300864 void *alloc_ctxs[])
865{
866 struct mxr_layer *layer = vb2_get_drv_priv(vq);
867 const struct mxr_format *fmt = layer->fmt;
868 int i;
869 struct mxr_device *mdev = layer->mdev;
870 struct v4l2_plane_pix_format planes[3];
871
872 mxr_dbg(mdev, "%s\n", __func__);
873 /* checking if format was configured */
874 if (fmt == NULL)
875 return -EINVAL;
876 mxr_dbg(mdev, "fmt = %s\n", fmt->name);
877 mxr_mplane_fill(planes, fmt, layer->geo.src.full_width,
878 layer->geo.src.full_height);
879
880 *nplanes = fmt->num_subframes;
881 for (i = 0; i < fmt->num_subframes; ++i) {
882 alloc_ctxs[i] = layer->mdev->alloc_ctx;
Marek Szyprowskic1bf9c62011-10-21 04:56:12 -0300883 sizes[i] = planes[i].sizeimage;
Tomasz Stanislawskic31e3c42012-03-09 07:07:28 -0300884 mxr_dbg(mdev, "size[%d] = %08x\n", i, sizes[i]);
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300885 }
886
887 if (*nbuffers == 0)
888 *nbuffers = 1;
889
890 return 0;
891}
892
893static void buf_queue(struct vb2_buffer *vb)
894{
895 struct mxr_buffer *buffer = container_of(vb, struct mxr_buffer, vb);
896 struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
897 struct mxr_device *mdev = layer->mdev;
898 unsigned long flags;
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300899
900 spin_lock_irqsave(&layer->enq_slock, flags);
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300901 list_add_tail(&buffer->list, &layer->enq_list);
902 spin_unlock_irqrestore(&layer->enq_slock, flags);
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300903
904 mxr_dbg(mdev, "queuing buffer\n");
905}
906
907static void wait_lock(struct vb2_queue *vq)
908{
909 struct mxr_layer *layer = vb2_get_drv_priv(vq);
910
911 mxr_dbg(layer->mdev, "%s\n", __func__);
912 mutex_lock(&layer->mutex);
913}
914
915static void wait_unlock(struct vb2_queue *vq)
916{
917 struct mxr_layer *layer = vb2_get_drv_priv(vq);
918
919 mxr_dbg(layer->mdev, "%s\n", __func__);
920 mutex_unlock(&layer->mutex);
921}
922
Marek Szyprowskibd323e22011-08-29 08:51:49 -0300923static int start_streaming(struct vb2_queue *vq, unsigned int count)
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300924{
925 struct mxr_layer *layer = vb2_get_drv_priv(vq);
926 struct mxr_device *mdev = layer->mdev;
927 unsigned long flags;
928
929 mxr_dbg(mdev, "%s\n", __func__);
Marek Szyprowskibd323e22011-08-29 08:51:49 -0300930
931 if (count == 0) {
932 mxr_dbg(mdev, "no output buffers queued\n");
933 return -EINVAL;
934 }
935
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300936 /* block any changes in output configuration */
937 mxr_output_get(mdev);
938
Tomasz Stanislawski0d066d32011-08-25 07:14:26 -0300939 mxr_layer_update_output(layer);
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300940 layer->ops.format_set(layer);
941 /* enabling layer in hardware */
942 spin_lock_irqsave(&layer->enq_slock, flags);
Marek Szyprowskibd323e22011-08-29 08:51:49 -0300943 layer->state = MXR_LAYER_STREAMING;
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300944 spin_unlock_irqrestore(&layer->enq_slock, flags);
945
Marek Szyprowskibd323e22011-08-29 08:51:49 -0300946 layer->ops.stream_set(layer, MXR_ENABLE);
947 mxr_streamer_get(mdev);
948
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -0300949 return 0;
950}
951
952static void mxr_watchdog(unsigned long arg)
953{
954 struct mxr_layer *layer = (struct mxr_layer *) arg;
955 struct mxr_device *mdev = layer->mdev;
956 unsigned long flags;
957
958 mxr_err(mdev, "watchdog fired for layer %s\n", layer->vfd.name);
959
960 spin_lock_irqsave(&layer->enq_slock, flags);
961
962 if (layer->update_buf == layer->shadow_buf)
963 layer->update_buf = NULL;
964 if (layer->update_buf) {
965 vb2_buffer_done(&layer->update_buf->vb, VB2_BUF_STATE_ERROR);
966 layer->update_buf = NULL;
967 }
968 if (layer->shadow_buf) {
969 vb2_buffer_done(&layer->shadow_buf->vb, VB2_BUF_STATE_ERROR);
970 layer->shadow_buf = NULL;
971 }
972 spin_unlock_irqrestore(&layer->enq_slock, flags);
973}
974
975static int stop_streaming(struct vb2_queue *vq)
976{
977 struct mxr_layer *layer = vb2_get_drv_priv(vq);
978 struct mxr_device *mdev = layer->mdev;
979 unsigned long flags;
980 struct timer_list watchdog;
981 struct mxr_buffer *buf, *buf_tmp;
982
983 mxr_dbg(mdev, "%s\n", __func__);
984
985 spin_lock_irqsave(&layer->enq_slock, flags);
986
987 /* reset list */
988 layer->state = MXR_LAYER_STREAMING_FINISH;
989
990 /* set all buffer to be done */
991 list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
992 list_del(&buf->list);
993 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
994 }
995
996 spin_unlock_irqrestore(&layer->enq_slock, flags);
997
998 /* give 1 seconds to complete to complete last buffers */
999 setup_timer_on_stack(&watchdog, mxr_watchdog,
1000 (unsigned long)layer);
1001 mod_timer(&watchdog, jiffies + msecs_to_jiffies(1000));
1002
1003 /* wait until all buffers are goes to done state */
1004 vb2_wait_for_all_buffers(vq);
1005
1006 /* stop timer if all synchronization is done */
1007 del_timer_sync(&watchdog);
1008 destroy_timer_on_stack(&watchdog);
1009
1010 /* stopping hardware */
1011 spin_lock_irqsave(&layer->enq_slock, flags);
1012 layer->state = MXR_LAYER_IDLE;
1013 spin_unlock_irqrestore(&layer->enq_slock, flags);
1014
1015 /* disabling layer in hardware */
1016 layer->ops.stream_set(layer, MXR_DISABLE);
1017 /* remove one streamer */
1018 mxr_streamer_put(mdev);
1019 /* allow changes in output configuration */
1020 mxr_output_put(mdev);
1021 return 0;
1022}
1023
1024static struct vb2_ops mxr_video_qops = {
1025 .queue_setup = queue_setup,
1026 .buf_queue = buf_queue,
1027 .wait_prepare = wait_unlock,
1028 .wait_finish = wait_lock,
1029 .start_streaming = start_streaming,
1030 .stop_streaming = stop_streaming,
1031};
1032
1033/* FIXME: try to put this functions to mxr_base_layer_create */
1034int mxr_base_layer_register(struct mxr_layer *layer)
1035{
1036 struct mxr_device *mdev = layer->mdev;
1037 int ret;
1038
1039 ret = video_register_device(&layer->vfd, VFL_TYPE_GRABBER, -1);
1040 if (ret)
1041 mxr_err(mdev, "failed to register video device\n");
1042 else
1043 mxr_info(mdev, "registered layer %s as /dev/video%d\n",
1044 layer->vfd.name, layer->vfd.num);
1045 return ret;
1046}
1047
1048void mxr_base_layer_unregister(struct mxr_layer *layer)
1049{
1050 video_unregister_device(&layer->vfd);
1051}
1052
1053void mxr_layer_release(struct mxr_layer *layer)
1054{
1055 if (layer->ops.release)
1056 layer->ops.release(layer);
1057}
1058
1059void mxr_base_layer_release(struct mxr_layer *layer)
1060{
1061 kfree(layer);
1062}
1063
1064static void mxr_vfd_release(struct video_device *vdev)
1065{
Sachin Kamatf5c99032012-06-11 06:13:52 -03001066 pr_info("video device release\n");
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -03001067}
1068
1069struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
1070 int idx, char *name, struct mxr_layer_ops *ops)
1071{
1072 struct mxr_layer *layer;
1073
Sachin Kamatc0d51202012-11-26 01:49:01 -03001074 layer = kzalloc(sizeof(*layer), GFP_KERNEL);
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -03001075 if (layer == NULL) {
1076 mxr_err(mdev, "not enough memory for layer.\n");
1077 goto fail;
1078 }
1079
1080 layer->mdev = mdev;
1081 layer->idx = idx;
1082 layer->ops = *ops;
1083
1084 spin_lock_init(&layer->enq_slock);
1085 INIT_LIST_HEAD(&layer->enq_list);
1086 mutex_init(&layer->mutex);
1087
1088 layer->vfd = (struct video_device) {
1089 .minor = -1,
1090 .release = mxr_vfd_release,
1091 .fops = &mxr_fops,
Hans Verkuil954f3402012-09-05 06:05:50 -03001092 .vfl_dir = VFL_DIR_TX,
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -03001093 .ioctl_ops = &mxr_ioctl_ops,
1094 };
1095 strlcpy(layer->vfd.name, name, sizeof(layer->vfd.name));
1096 /* let framework control PRIORITY */
1097 set_bit(V4L2_FL_USE_FH_PRIO, &layer->vfd.flags);
1098
1099 video_set_drvdata(&layer->vfd, layer);
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -03001100 layer->vfd.lock = &layer->mutex;
1101 layer->vfd.v4l2_dev = &mdev->v4l2_dev;
1102
1103 layer->vb_queue = (struct vb2_queue) {
1104 .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
Tomasz Stanislawskifa775212012-06-14 10:37:48 -03001105 .io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF,
Tomasz Stanislawskifef1c8d2011-02-02 05:40:08 -03001106 .drv_priv = layer,
1107 .buf_struct_size = sizeof(struct mxr_buffer),
1108 .ops = &mxr_video_qops,
1109 .mem_ops = &vb2_dma_contig_memops,
1110 };
1111
1112 return layer;
1113
1114fail:
1115 return NULL;
1116}
1117
1118static const struct mxr_format *find_format_by_fourcc(
1119 struct mxr_layer *layer, unsigned long fourcc)
1120{
1121 int i;
1122
1123 for (i = 0; i < layer->fmt_array_size; ++i)
1124 if (layer->fmt_array[i]->fourcc == fourcc)
1125 return layer->fmt_array[i];
1126 return NULL;
1127}
1128
1129static const struct mxr_format *find_format_by_index(
1130 struct mxr_layer *layer, unsigned long index)
1131{
1132 if (index >= layer->fmt_array_size)
1133 return NULL;
1134 return layer->fmt_array[index];
1135}
1136