blob: e5449a2c8475ecb49ae168ddd7c33b3539b4418e [file] [log] [blame]
Pawel Osciak7f986392010-04-23 05:38:37 -03001/*
2 * Memory-to-memory device framework for Video for Linux 2.
3 *
4 * Helper functions for devices that use memory buffers for both source
5 * and destination.
6 *
7 * Copyright (c) 2009 Samsung Electronics Co., Ltd.
Pawel Osciak95072082011-03-13 15:23:32 -03008 * Pawel Osciak, <pawel@osciak.com>
Pawel Osciak7f986392010-04-23 05:38:37 -03009 * Marek Szyprowski, <m.szyprowski@samsung.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the
14 * License, or (at your option) any later version
15 */
16
17#ifndef _MEDIA_V4L2_MEM2MEM_H
18#define _MEDIA_V4L2_MEM2MEM_H
19
Junghak Sungc1399902015-09-22 10:30:29 -030020#include <media/videobuf2-v4l2.h>
Pawel Osciak7f986392010-04-23 05:38:37 -030021
22/**
23 * struct v4l2_m2m_ops - mem-to-mem device driver callbacks
24 * @device_run: required. Begin the actual job (transaction) inside this
25 * callback.
26 * The job does NOT have to end before this callback returns
27 * (and it will be the usual case). When the job finishes,
28 * v4l2_m2m_job_finish() has to be called.
29 * @job_ready: optional. Should return 0 if the driver does not have a job
30 * fully prepared to run yet (i.e. it will not be able to finish a
31 * transaction without sleeping). If not provided, it will be
32 * assumed that one source and one destination buffer are all
33 * that is required for the driver to perform one full transaction.
34 * This method may not sleep.
35 * @job_abort: required. Informs the driver that it has to abort the currently
36 * running transaction as soon as possible (i.e. as soon as it can
37 * stop the device safely; e.g. in the next interrupt handler),
38 * even if the transaction would not have been finished by then.
39 * After the driver performs the necessary steps, it has to call
40 * v4l2_m2m_job_finish() (as if the transaction ended normally).
41 * This function does not have to (and will usually not) wait
42 * until the device enters a state when it can be stopped.
Mauro Carvalho Chehab62c0d012015-08-22 05:34:40 -030043 * @lock: optional. Define a driver's own lock callback, instead of using
44 * m2m_ctx->q_lock.
45 * @unlock: optional. Define a driver's own unlock callback, instead of
46 * using m2m_ctx->q_lock.
Pawel Osciak7f986392010-04-23 05:38:37 -030047 */
48struct v4l2_m2m_ops {
49 void (*device_run)(void *priv);
50 int (*job_ready)(void *priv);
51 void (*job_abort)(void *priv);
Marek Szyprowski908a0d72011-01-12 06:50:24 -030052 void (*lock)(void *priv);
53 void (*unlock)(void *priv);
Pawel Osciak7f986392010-04-23 05:38:37 -030054};
55
56struct v4l2_m2m_dev;
57
58struct v4l2_m2m_queue_ctx {
59/* private: internal use only */
Marek Szyprowski908a0d72011-01-12 06:50:24 -030060 struct vb2_queue q;
Pawel Osciak7f986392010-04-23 05:38:37 -030061
62 /* Queue for buffers ready to be processed as soon as this
63 * instance receives access to the device */
64 struct list_head rdy_queue;
Marek Szyprowski908a0d72011-01-12 06:50:24 -030065 spinlock_t rdy_spinlock;
Pawel Osciak7f986392010-04-23 05:38:37 -030066 u8 num_rdy;
Philipp Zabel33bdd5a2013-06-03 04:23:48 -030067 bool buffered;
Pawel Osciak7f986392010-04-23 05:38:37 -030068};
69
70struct v4l2_m2m_ctx {
Sylwester Nawrocki8e6e8f92013-09-14 18:39:04 -030071 /* optional cap/out vb2 queues lock */
72 struct mutex *q_lock;
73
Pawel Osciak7f986392010-04-23 05:38:37 -030074/* private: internal use only */
75 struct v4l2_m2m_dev *m2m_dev;
76
77 /* Capture (output to memory) queue context */
78 struct v4l2_m2m_queue_ctx cap_q_ctx;
79
80 /* Output (input from memory) queue context */
81 struct v4l2_m2m_queue_ctx out_q_ctx;
82
83 /* For device job queue */
84 struct list_head queue;
85 unsigned long job_flags;
Marek Szyprowski908a0d72011-01-12 06:50:24 -030086 wait_queue_head_t finished;
Pawel Osciak7f986392010-04-23 05:38:37 -030087
88 /* Instance private data */
89 void *priv;
90};
91
Marek Szyprowski908a0d72011-01-12 06:50:24 -030092struct v4l2_m2m_buffer {
Junghak Sung2d700712015-09-22 10:30:30 -030093 struct vb2_v4l2_buffer vb;
Marek Szyprowski908a0d72011-01-12 06:50:24 -030094 struct list_head list;
95};
96
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -030097/**
98 * v4l2_m2m_get_curr_priv() - return driver private data for the currently
99 * running instance or NULL if no instance is running
100 */
Pawel Osciak7f986392010-04-23 05:38:37 -0300101void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
102
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300103/**
104 * v4l2_m2m_get_vq() - return vb2_queue for the given type
105 */
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300106struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
Pawel Osciak7f986392010-04-23 05:38:37 -0300107 enum v4l2_buf_type type);
108
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300109/**
110 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
111 * the pending job queue and add it if so.
112 * @m2m_ctx: m2m context assigned to the instance to be checked
113 *
114 * There are three basic requirements an instance has to meet to be able to run:
115 * 1) at least one source buffer has to be queued,
116 * 2) at least one destination buffer has to be queued,
117 * 3) streaming has to be on.
118 *
119 * If a queue is buffered (for example a decoder hardware ringbuffer that has
120 * to be drained before doing streamoff), allow scheduling without v4l2 buffers
121 * on that queue.
122 *
123 * There may also be additional, custom requirements. In such case the driver
124 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
125 * return 1 if the instance is ready.
126 * An example of the above could be an instance that requires more than one
127 * src/dst buffer per transaction.
128 */
Michael Olbrich1190a412014-07-22 09:36:04 -0300129void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
130
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300131/**
132 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
133 * and have it clean up
134 *
135 * Called by a driver to yield back the device after it has finished with it.
136 * Should be called as soon as possible after reaching a state which allows
137 * other instances to take control of the device.
138 *
139 * This function has to be called only after device_run() callback has been
140 * called on the driver. To prevent recursion, it should not be called directly
141 * from the device_run() callback though.
142 */
Pawel Osciak7f986392010-04-23 05:38:37 -0300143void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
144 struct v4l2_m2m_ctx *m2m_ctx);
145
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300146static inline void
Junghak Sung2d700712015-09-22 10:30:30 -0300147v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300148{
Junghak Sung2d700712015-09-22 10:30:30 -0300149 vb2_buffer_done(&buf->vb2_buf, state);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300150}
151
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300152/**
153 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
154 */
Pawel Osciak7f986392010-04-23 05:38:37 -0300155int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
156 struct v4l2_requestbuffers *reqbufs);
157
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300158/**
159 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
160 *
161 * See v4l2_m2m_mmap() documentation for details.
162 */
Pawel Osciak7f986392010-04-23 05:38:37 -0300163int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
164 struct v4l2_buffer *buf);
165
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300166/**
167 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
168 * the type
169 */
Pawel Osciak7f986392010-04-23 05:38:37 -0300170int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
171 struct v4l2_buffer *buf);
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300172
173/**
174 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
175 * the type
176 */
Pawel Osciak7f986392010-04-23 05:38:37 -0300177int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
178 struct v4l2_buffer *buf);
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300179
180/**
181 * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on
182 * the type
183 */
Hans Verkuile68cf472015-06-05 11:28:50 -0300184int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
185 struct v4l2_buffer *buf);
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300186
187/**
188 * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
189 * on the type
190 */
Philipp Zabel8b94ca62013-05-21 04:16:28 -0300191int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
192 struct v4l2_create_buffers *create);
Pawel Osciak7f986392010-04-23 05:38:37 -0300193
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300194/**
195 * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
196 * the type
197 */
Tomasz Stanislawski83ae7c52012-06-14 11:32:24 -0300198int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
199 struct v4l2_exportbuffer *eb);
200
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300201/**
202 * v4l2_m2m_streamon() - turn on streaming for a video queue
203 */
Pawel Osciak7f986392010-04-23 05:38:37 -0300204int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
205 enum v4l2_buf_type type);
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300206
207/**
208 * v4l2_m2m_streamoff() - turn off streaming for a video queue
209 */
Pawel Osciak7f986392010-04-23 05:38:37 -0300210int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
211 enum v4l2_buf_type type);
212
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300213/**
214 * v4l2_m2m_poll() - poll replacement, for destination buffers only
215 *
216 * Call from the driver's poll() function. Will poll both queues. If a buffer
217 * is available to dequeue (with dqbuf) from the source queue, this will
218 * indicate that a non-blocking write can be performed, while read will be
219 * returned in case of the destination queue.
220 */
Pawel Osciak7f986392010-04-23 05:38:37 -0300221unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
222 struct poll_table_struct *wait);
223
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300224/**
225 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
226 *
227 * Call from driver's mmap() function. Will handle mmap() for both queues
228 * seamlessly for videobuffer, which will receive normal per-queue offsets and
229 * proper videobuf queue pointers. The differentiation is made outside videobuf
230 * by adding a predefined offset to buffers from one of the queues and
231 * subtracting it before passing it back to videobuf. Only drivers (and
232 * thus applications) receive modified offsets.
233 */
Pawel Osciak7f986392010-04-23 05:38:37 -0300234int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
235 struct vm_area_struct *vma);
236
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300237/**
238 * v4l2_m2m_init() - initialize per-driver m2m data
239 *
240 * Usually called from driver's probe() function.
241 */
Guennadi Liakhovetskib1252eb2012-09-11 06:32:17 -0300242struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300243
244/**
245 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
246 *
247 * Usually called from driver's remove() function.
248 */
Pawel Osciak7f986392010-04-23 05:38:37 -0300249void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
250
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300251/**
252 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
253 * @priv - driver's instance private data
254 * @m2m_dev - a previously initialized m2m_dev struct
255 * @vq_init - a callback for queue type-specific initialization function to be
256 * used for initializing videobuf_queues
257 *
258 * Usually called from driver's open() function.
259 */
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300260struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
261 void *drv_priv,
262 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
263
Philipp Zabel33bdd5a2013-06-03 04:23:48 -0300264static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx,
265 bool buffered)
266{
267 m2m_ctx->out_q_ctx.buffered = buffered;
268}
269
270static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
271 bool buffered)
272{
273 m2m_ctx->cap_q_ctx.buffered = buffered;
274}
275
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300276/**
277 * v4l2_m2m_ctx_release() - release m2m context
278 *
279 * Usually called from driver's release() function.
280 */
Pawel Osciak7f986392010-04-23 05:38:37 -0300281void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
282
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300283/**
284 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
285 *
286 * Call from buf_queue(), videobuf_queue_ops callback.
287 */
Junghak Sung2d700712015-09-22 10:30:30 -0300288void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
289 struct vb2_v4l2_buffer *vbuf);
Pawel Osciak7f986392010-04-23 05:38:37 -0300290
291/**
292 * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
293 * use
Mauro Carvalho Chehab62c0d012015-08-22 05:34:40 -0300294 *
295 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
Pawel Osciak7f986392010-04-23 05:38:37 -0300296 */
297static inline
298unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
299{
Sascha Hauer961ae442012-08-31 09:18:04 -0300300 return m2m_ctx->out_q_ctx.num_rdy;
Pawel Osciak7f986392010-04-23 05:38:37 -0300301}
302
303/**
304 * v4l2_m2m_num_src_bufs_ready() - return the number of destination buffers
305 * ready for use
Mauro Carvalho Chehab62c0d012015-08-22 05:34:40 -0300306 *
307 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
Pawel Osciak7f986392010-04-23 05:38:37 -0300308 */
309static inline
310unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
311{
Sascha Hauer961ae442012-08-31 09:18:04 -0300312 return m2m_ctx->cap_q_ctx.num_rdy;
Pawel Osciak7f986392010-04-23 05:38:37 -0300313}
314
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300315/**
316 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
317 */
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300318void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
Pawel Osciak7f986392010-04-23 05:38:37 -0300319
320/**
321 * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
322 * buffers
Mauro Carvalho Chehab62c0d012015-08-22 05:34:40 -0300323 *
324 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
Pawel Osciak7f986392010-04-23 05:38:37 -0300325 */
326static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
327{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300328 return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx);
Pawel Osciak7f986392010-04-23 05:38:37 -0300329}
330
331/**
332 * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
333 * ready buffers
Mauro Carvalho Chehab62c0d012015-08-22 05:34:40 -0300334 *
335 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
Pawel Osciak7f986392010-04-23 05:38:37 -0300336 */
337static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
338{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300339 return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx);
Pawel Osciak7f986392010-04-23 05:38:37 -0300340}
341
342/**
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300343 * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
Mauro Carvalho Chehab62c0d012015-08-22 05:34:40 -0300344 *
345 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
Pawel Osciak7f986392010-04-23 05:38:37 -0300346 */
347static inline
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300348struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
Pawel Osciak7f986392010-04-23 05:38:37 -0300349{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300350 return &m2m_ctx->out_q_ctx.q;
Pawel Osciak7f986392010-04-23 05:38:37 -0300351}
352
353/**
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300354 * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
Mauro Carvalho Chehab62c0d012015-08-22 05:34:40 -0300355 *
356 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
Pawel Osciak7f986392010-04-23 05:38:37 -0300357 */
358static inline
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300359struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
Pawel Osciak7f986392010-04-23 05:38:37 -0300360{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300361 return &m2m_ctx->cap_q_ctx.q;
Pawel Osciak7f986392010-04-23 05:38:37 -0300362}
363
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300364/**
365 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
366 * return it
367 */
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300368void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
Pawel Osciak7f986392010-04-23 05:38:37 -0300369
370/**
371 * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
372 * buffers and return it
Mauro Carvalho Chehab62c0d012015-08-22 05:34:40 -0300373 *
374 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
Pawel Osciak7f986392010-04-23 05:38:37 -0300375 */
376static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
377{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300378 return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx);
Pawel Osciak7f986392010-04-23 05:38:37 -0300379}
380
381/**
382 * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
383 * ready buffers and return it
Mauro Carvalho Chehab62c0d012015-08-22 05:34:40 -0300384 *
385 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
Pawel Osciak7f986392010-04-23 05:38:37 -0300386 */
387static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
388{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300389 return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx);
Pawel Osciak7f986392010-04-23 05:38:37 -0300390}
391
Sylwester Nawrocki8e6e8f92013-09-14 18:39:04 -0300392/* v4l2 ioctl helpers */
393
394int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
395 struct v4l2_requestbuffers *rb);
396int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh,
397 struct v4l2_create_buffers *create);
398int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh,
399 struct v4l2_buffer *buf);
400int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh,
401 struct v4l2_exportbuffer *eb);
402int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh,
403 struct v4l2_buffer *buf);
404int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh,
405 struct v4l2_buffer *buf);
Hans Verkuile68cf472015-06-05 11:28:50 -0300406int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh,
407 struct v4l2_buffer *buf);
Sylwester Nawrocki8e6e8f92013-09-14 18:39:04 -0300408int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
409 enum v4l2_buf_type type);
410int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
411 enum v4l2_buf_type type);
412int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
413unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
414
Pawel Osciak7f986392010-04-23 05:38:37 -0300415#endif /* _MEDIA_V4L2_MEM2MEM_H */
416