blob: 438ea45d10749750a2941474265b0e72ea75f239 [file] [log] [blame]
Pawel Osciak7f986392010-04-23 05:38:37 -03001/*
2 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
3 *
4 * Helper functions for devices that use videobuf buffers for both their
5 * source and destination.
6 *
7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
Pawel Osciak95072082011-03-13 15:23:32 -03008 * Pawel Osciak, <pawel@osciak.com>
Pawel Osciak7f986392010-04-23 05:38:37 -03009 * Marek Szyprowski, <m.szyprowski@samsung.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19
Marek Szyprowski908a0d72011-01-12 06:50:24 -030020#include <media/videobuf2-core.h>
Pawel Osciak7f986392010-04-23 05:38:37 -030021#include <media/v4l2-mem2mem.h>
Hans Verkuil08eb8512012-07-18 10:53:04 -030022#include <media/v4l2-dev.h>
23#include <media/v4l2-fh.h>
24#include <media/v4l2-event.h>
Pawel Osciak7f986392010-04-23 05:38:37 -030025
26MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
Pawel Osciak95072082011-03-13 15:23:32 -030027MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
Pawel Osciak7f986392010-04-23 05:38:37 -030028MODULE_LICENSE("GPL");
29
30static bool debug;
31module_param(debug, bool, 0644);
32
33#define dprintk(fmt, arg...) \
34 do { \
35 if (debug) \
36 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
37 } while (0)
38
39
40/* Instance is already queued on the job_queue */
41#define TRANS_QUEUED (1 << 0)
42/* Instance is currently running in hardware */
43#define TRANS_RUNNING (1 << 1)
44
45
46/* Offset base for buffers on the destination queue - used to distinguish
47 * between source and destination buffers when mmapping - they receive the same
48 * offsets but for different queues */
49#define DST_QUEUE_OFF_BASE (1 << 30)
50
51
52/**
53 * struct v4l2_m2m_dev - per-device context
54 * @curr_ctx: currently running instance
55 * @job_queue: instances queued to run
56 * @job_spinlock: protects job_queue
57 * @m2m_ops: driver callbacks
58 */
59struct v4l2_m2m_dev {
60 struct v4l2_m2m_ctx *curr_ctx;
61
62 struct list_head job_queue;
63 spinlock_t job_spinlock;
64
65 struct v4l2_m2m_ops *m2m_ops;
66};
67
68static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
69 enum v4l2_buf_type type)
70{
Marek Szyprowski908a0d72011-01-12 06:50:24 -030071 if (V4L2_TYPE_IS_OUTPUT(type))
Pawel Osciak7f986392010-04-23 05:38:37 -030072 return &m2m_ctx->out_q_ctx;
Marek Szyprowski908a0d72011-01-12 06:50:24 -030073 else
74 return &m2m_ctx->cap_q_ctx;
Pawel Osciak7f986392010-04-23 05:38:37 -030075}
76
77/**
Marek Szyprowski908a0d72011-01-12 06:50:24 -030078 * v4l2_m2m_get_vq() - return vb2_queue for the given type
Pawel Osciak7f986392010-04-23 05:38:37 -030079 */
Marek Szyprowski908a0d72011-01-12 06:50:24 -030080struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
Pawel Osciak7f986392010-04-23 05:38:37 -030081 enum v4l2_buf_type type)
82{
83 struct v4l2_m2m_queue_ctx *q_ctx;
84
85 q_ctx = get_queue_ctx(m2m_ctx, type);
86 if (!q_ctx)
87 return NULL;
88
89 return &q_ctx->q;
90}
91EXPORT_SYMBOL(v4l2_m2m_get_vq);
92
93/**
94 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
95 */
Marek Szyprowski908a0d72011-01-12 06:50:24 -030096void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
Pawel Osciak7f986392010-04-23 05:38:37 -030097{
Marek Szyprowski908a0d72011-01-12 06:50:24 -030098 struct v4l2_m2m_buffer *b = NULL;
Pawel Osciak7f986392010-04-23 05:38:37 -030099 unsigned long flags;
100
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300101 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
Pawel Osciak7f986392010-04-23 05:38:37 -0300102
Andrzej Pietrasiewicza6bd62be2011-08-25 07:21:21 -0300103 if (list_empty(&q_ctx->rdy_queue)) {
104 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
105 return NULL;
106 }
Pawel Osciak7f986392010-04-23 05:38:37 -0300107
Sascha Hauerc392e9e2012-08-31 09:18:03 -0300108 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300109 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
110 return &b->vb;
Pawel Osciak7f986392010-04-23 05:38:37 -0300111}
112EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
113
114/**
115 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
116 * return it
117 */
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300118void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
Pawel Osciak7f986392010-04-23 05:38:37 -0300119{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300120 struct v4l2_m2m_buffer *b = NULL;
Pawel Osciak7f986392010-04-23 05:38:37 -0300121 unsigned long flags;
122
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300123 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
Andrzej Pietrasiewicza6bd62be2011-08-25 07:21:21 -0300124 if (list_empty(&q_ctx->rdy_queue)) {
125 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
126 return NULL;
Pawel Osciak7f986392010-04-23 05:38:37 -0300127 }
Sascha Hauerc392e9e2012-08-31 09:18:03 -0300128 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
Andrzej Pietrasiewicza6bd62be2011-08-25 07:21:21 -0300129 list_del(&b->list);
130 q_ctx->num_rdy--;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300131 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
Pawel Osciak7f986392010-04-23 05:38:37 -0300132
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300133 return &b->vb;
Pawel Osciak7f986392010-04-23 05:38:37 -0300134}
135EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
136
137/*
138 * Scheduling handlers
139 */
140
141/**
142 * v4l2_m2m_get_curr_priv() - return driver private data for the currently
143 * running instance or NULL if no instance is running
144 */
145void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
146{
147 unsigned long flags;
148 void *ret = NULL;
149
150 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
151 if (m2m_dev->curr_ctx)
152 ret = m2m_dev->curr_ctx->priv;
153 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
154
155 return ret;
156}
157EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
158
159/**
160 * v4l2_m2m_try_run() - select next job to perform and run it if possible
161 *
162 * Get next transaction (if present) from the waiting jobs list and run it.
163 */
164static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
165{
166 unsigned long flags;
167
168 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
169 if (NULL != m2m_dev->curr_ctx) {
170 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
171 dprintk("Another instance is running, won't run now\n");
172 return;
173 }
174
175 if (list_empty(&m2m_dev->job_queue)) {
176 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
177 dprintk("No job pending\n");
178 return;
179 }
180
Sascha Hauerc392e9e2012-08-31 09:18:03 -0300181 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
Pawel Osciak7f986392010-04-23 05:38:37 -0300182 struct v4l2_m2m_ctx, queue);
183 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
184 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
185
186 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
187}
188
189/**
190 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
191 * the pending job queue and add it if so.
192 * @m2m_ctx: m2m context assigned to the instance to be checked
193 *
194 * There are three basic requirements an instance has to meet to be able to run:
195 * 1) at least one source buffer has to be queued,
196 * 2) at least one destination buffer has to be queued,
197 * 3) streaming has to be on.
198 *
199 * There may also be additional, custom requirements. In such case the driver
200 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
201 * return 1 if the instance is ready.
202 * An example of the above could be an instance that requires more than one
203 * src/dst buffer per transaction.
204 */
205static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
206{
207 struct v4l2_m2m_dev *m2m_dev;
208 unsigned long flags_job, flags;
209
210 m2m_dev = m2m_ctx->m2m_dev;
211 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
212
213 if (!m2m_ctx->out_q_ctx.q.streaming
214 || !m2m_ctx->cap_q_ctx.q.streaming) {
215 dprintk("Streaming needs to be on for both queues\n");
216 return;
217 }
218
219 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
220 if (m2m_ctx->job_flags & TRANS_QUEUED) {
221 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
222 dprintk("On job queue already\n");
223 return;
224 }
225
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300226 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
Pawel Osciak7f986392010-04-23 05:38:37 -0300227 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300228 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
Pawel Osciak7f986392010-04-23 05:38:37 -0300229 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
230 dprintk("No input buffers available\n");
231 return;
232 }
233 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300234 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
Pawel Osciak7f986392010-04-23 05:38:37 -0300235 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
236 dprintk("No output buffers available\n");
237 return;
238 }
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300239 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
Pawel Osciak7f986392010-04-23 05:38:37 -0300240
241 if (m2m_dev->m2m_ops->job_ready
242 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
243 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
244 dprintk("Driver not ready\n");
245 return;
246 }
247
248 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
249 m2m_ctx->job_flags |= TRANS_QUEUED;
250
251 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
252
253 v4l2_m2m_try_run(m2m_dev);
254}
255
256/**
257 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
258 * and have it clean up
259 *
260 * Called by a driver to yield back the device after it has finished with it.
261 * Should be called as soon as possible after reaching a state which allows
262 * other instances to take control of the device.
263 *
264 * This function has to be called only after device_run() callback has been
265 * called on the driver. To prevent recursion, it should not be called directly
266 * from the device_run() callback though.
267 */
268void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
269 struct v4l2_m2m_ctx *m2m_ctx)
270{
271 unsigned long flags;
272
273 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
274 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
275 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
276 dprintk("Called by an instance not currently running\n");
277 return;
278 }
279
280 list_del(&m2m_dev->curr_ctx->queue);
281 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300282 wake_up(&m2m_dev->curr_ctx->finished);
Pawel Osciak7f986392010-04-23 05:38:37 -0300283 m2m_dev->curr_ctx = NULL;
284
285 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
286
287 /* This instance might have more buffers ready, but since we do not
288 * allow more than one job on the job_queue per instance, each has
289 * to be scheduled separately after the previous one finishes. */
290 v4l2_m2m_try_schedule(m2m_ctx);
291 v4l2_m2m_try_run(m2m_dev);
292}
293EXPORT_SYMBOL(v4l2_m2m_job_finish);
294
295/**
296 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
297 */
298int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
299 struct v4l2_requestbuffers *reqbufs)
300{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300301 struct vb2_queue *vq;
Pawel Osciak7f986392010-04-23 05:38:37 -0300302
303 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300304 return vb2_reqbufs(vq, reqbufs);
Pawel Osciak7f986392010-04-23 05:38:37 -0300305}
306EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
307
308/**
309 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
310 *
311 * See v4l2_m2m_mmap() documentation for details.
312 */
313int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
314 struct v4l2_buffer *buf)
315{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300316 struct vb2_queue *vq;
317 int ret = 0;
318 unsigned int i;
Pawel Osciak7f986392010-04-23 05:38:37 -0300319
320 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300321 ret = vb2_querybuf(vq, buf);
Pawel Osciak7f986392010-04-23 05:38:37 -0300322
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300323 /* Adjust MMAP memory offsets for the CAPTURE queue */
324 if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
325 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
326 for (i = 0; i < buf->length; ++i)
327 buf->m.planes[i].m.mem_offset
328 += DST_QUEUE_OFF_BASE;
329 } else {
330 buf->m.offset += DST_QUEUE_OFF_BASE;
331 }
Pawel Osciak7f986392010-04-23 05:38:37 -0300332 }
333
334 return ret;
335}
336EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
337
338/**
339 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
340 * the type
341 */
342int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
343 struct v4l2_buffer *buf)
344{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300345 struct vb2_queue *vq;
Pawel Osciak7f986392010-04-23 05:38:37 -0300346 int ret;
347
348 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300349 ret = vb2_qbuf(vq, buf);
Pawel Osciak7f986392010-04-23 05:38:37 -0300350 if (!ret)
351 v4l2_m2m_try_schedule(m2m_ctx);
352
353 return ret;
354}
355EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
356
357/**
358 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
359 * the type
360 */
361int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
362 struct v4l2_buffer *buf)
363{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300364 struct vb2_queue *vq;
Pawel Osciak7f986392010-04-23 05:38:37 -0300365
366 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300367 return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
Pawel Osciak7f986392010-04-23 05:38:37 -0300368}
369EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
370
371/**
Tomasz Stanislawski83ae7c52012-06-14 11:32:24 -0300372 * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
373 * the type
374 */
375int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
376 struct v4l2_exportbuffer *eb)
377{
378 struct vb2_queue *vq;
379
380 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
381 return vb2_expbuf(vq, eb);
382}
383EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
384/**
Pawel Osciak7f986392010-04-23 05:38:37 -0300385 * v4l2_m2m_streamon() - turn on streaming for a video queue
386 */
387int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
388 enum v4l2_buf_type type)
389{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300390 struct vb2_queue *vq;
Pawel Osciak7f986392010-04-23 05:38:37 -0300391 int ret;
392
393 vq = v4l2_m2m_get_vq(m2m_ctx, type);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300394 ret = vb2_streamon(vq, type);
Pawel Osciak7f986392010-04-23 05:38:37 -0300395 if (!ret)
396 v4l2_m2m_try_schedule(m2m_ctx);
397
398 return ret;
399}
400EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
401
402/**
403 * v4l2_m2m_streamoff() - turn off streaming for a video queue
404 */
405int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
406 enum v4l2_buf_type type)
407{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300408 struct vb2_queue *vq;
Pawel Osciak7f986392010-04-23 05:38:37 -0300409
410 vq = v4l2_m2m_get_vq(m2m_ctx, type);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300411 return vb2_streamoff(vq, type);
Pawel Osciak7f986392010-04-23 05:38:37 -0300412}
413EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
414
415/**
416 * v4l2_m2m_poll() - poll replacement, for destination buffers only
417 *
418 * Call from the driver's poll() function. Will poll both queues. If a buffer
419 * is available to dequeue (with dqbuf) from the source queue, this will
420 * indicate that a non-blocking write can be performed, while read will be
421 * returned in case of the destination queue.
422 */
423unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
424 struct poll_table_struct *wait)
425{
Hans Verkuil08eb8512012-07-18 10:53:04 -0300426 struct video_device *vfd = video_devdata(file);
427 unsigned long req_events = poll_requested_events(wait);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300428 struct vb2_queue *src_q, *dst_q;
429 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
Pawel Osciak7f986392010-04-23 05:38:37 -0300430 unsigned int rc = 0;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300431 unsigned long flags;
Pawel Osciak7f986392010-04-23 05:38:37 -0300432
Hans Verkuil08eb8512012-07-18 10:53:04 -0300433 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
434 struct v4l2_fh *fh = file->private_data;
435
436 if (v4l2_event_pending(fh))
437 rc = POLLPRI;
438 else if (req_events & POLLPRI)
439 poll_wait(file, &fh->wait, wait);
440 if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
441 return rc;
442 }
443
Pawel Osciak7f986392010-04-23 05:38:37 -0300444 src_q = v4l2_m2m_get_src_vq(m2m_ctx);
445 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
446
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300447 /*
448 * There has to be at least one buffer queued on each queued_list, which
449 * means either in driver already or waiting for driver to claim it
450 * and start processing.
451 */
452 if ((!src_q->streaming || list_empty(&src_q->queued_list))
453 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
Hans Verkuil08eb8512012-07-18 10:53:04 -0300454 rc |= POLLERR;
Pawel Osciak7f986392010-04-23 05:38:37 -0300455 goto end;
456 }
457
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300458 if (m2m_ctx->m2m_dev->m2m_ops->unlock)
459 m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
460
461 poll_wait(file, &src_q->done_wq, wait);
462 poll_wait(file, &dst_q->done_wq, wait);
463
464 if (m2m_ctx->m2m_dev->m2m_ops->lock)
465 m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
466
467 spin_lock_irqsave(&src_q->done_lock, flags);
468 if (!list_empty(&src_q->done_list))
469 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
470 done_entry);
471 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
472 || src_vb->state == VB2_BUF_STATE_ERROR))
473 rc |= POLLOUT | POLLWRNORM;
474 spin_unlock_irqrestore(&src_q->done_lock, flags);
475
476 spin_lock_irqsave(&dst_q->done_lock, flags);
477 if (!list_empty(&dst_q->done_list))
478 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
479 done_entry);
480 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
481 || dst_vb->state == VB2_BUF_STATE_ERROR))
482 rc |= POLLIN | POLLRDNORM;
483 spin_unlock_irqrestore(&dst_q->done_lock, flags);
Pawel Osciak7f986392010-04-23 05:38:37 -0300484
485end:
Pawel Osciak7f986392010-04-23 05:38:37 -0300486 return rc;
487}
488EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
489
490/**
491 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
492 *
493 * Call from driver's mmap() function. Will handle mmap() for both queues
494 * seamlessly for videobuffer, which will receive normal per-queue offsets and
495 * proper videobuf queue pointers. The differentiation is made outside videobuf
496 * by adding a predefined offset to buffers from one of the queues and
497 * subtracting it before passing it back to videobuf. Only drivers (and
498 * thus applications) receive modified offsets.
499 */
500int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
501 struct vm_area_struct *vma)
502{
503 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300504 struct vb2_queue *vq;
Pawel Osciak7f986392010-04-23 05:38:37 -0300505
506 if (offset < DST_QUEUE_OFF_BASE) {
507 vq = v4l2_m2m_get_src_vq(m2m_ctx);
508 } else {
509 vq = v4l2_m2m_get_dst_vq(m2m_ctx);
510 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
511 }
512
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300513 return vb2_mmap(vq, vma);
Pawel Osciak7f986392010-04-23 05:38:37 -0300514}
515EXPORT_SYMBOL(v4l2_m2m_mmap);
516
517/**
518 * v4l2_m2m_init() - initialize per-driver m2m data
519 *
520 * Usually called from driver's probe() function.
521 */
522struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops)
523{
524 struct v4l2_m2m_dev *m2m_dev;
525
Nicolas THERY3fac4eb2012-10-23 04:47:19 -0300526 if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
527 WARN_ON(!m2m_ops->job_abort))
Pawel Osciak7f986392010-04-23 05:38:37 -0300528 return ERR_PTR(-EINVAL);
529
Pawel Osciak7f986392010-04-23 05:38:37 -0300530 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
531 if (!m2m_dev)
532 return ERR_PTR(-ENOMEM);
533
534 m2m_dev->curr_ctx = NULL;
535 m2m_dev->m2m_ops = m2m_ops;
536 INIT_LIST_HEAD(&m2m_dev->job_queue);
537 spin_lock_init(&m2m_dev->job_spinlock);
538
539 return m2m_dev;
540}
541EXPORT_SYMBOL_GPL(v4l2_m2m_init);
542
543/**
544 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
545 *
546 * Usually called from driver's remove() function.
547 */
548void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
549{
550 kfree(m2m_dev);
551}
552EXPORT_SYMBOL_GPL(v4l2_m2m_release);
553
554/**
555 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
556 * @priv - driver's instance private data
557 * @m2m_dev - a previously initialized m2m_dev struct
558 * @vq_init - a callback for queue type-specific initialization function to be
559 * used for initializing videobuf_queues
560 *
561 * Usually called from driver's open() function.
562 */
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300563struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
564 void *drv_priv,
565 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
Pawel Osciak7f986392010-04-23 05:38:37 -0300566{
567 struct v4l2_m2m_ctx *m2m_ctx;
568 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300569 int ret;
Pawel Osciak7f986392010-04-23 05:38:37 -0300570
571 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
572 if (!m2m_ctx)
573 return ERR_PTR(-ENOMEM);
574
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300575 m2m_ctx->priv = drv_priv;
Pawel Osciak7f986392010-04-23 05:38:37 -0300576 m2m_ctx->m2m_dev = m2m_dev;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300577 init_waitqueue_head(&m2m_ctx->finished);
Pawel Osciak7f986392010-04-23 05:38:37 -0300578
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300579 out_q_ctx = &m2m_ctx->out_q_ctx;
580 cap_q_ctx = &m2m_ctx->cap_q_ctx;
Pawel Osciak7f986392010-04-23 05:38:37 -0300581
582 INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
583 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300584 spin_lock_init(&out_q_ctx->rdy_spinlock);
585 spin_lock_init(&cap_q_ctx->rdy_spinlock);
Pawel Osciak7f986392010-04-23 05:38:37 -0300586
587 INIT_LIST_HEAD(&m2m_ctx->queue);
588
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300589 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
590
591 if (ret)
592 goto err;
Pawel Osciak7f986392010-04-23 05:38:37 -0300593
594 return m2m_ctx;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300595err:
596 kfree(m2m_ctx);
597 return ERR_PTR(ret);
Pawel Osciak7f986392010-04-23 05:38:37 -0300598}
599EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
600
601/**
602 * v4l2_m2m_ctx_release() - release m2m context
603 *
604 * Usually called from driver's release() function.
605 */
606void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
607{
608 struct v4l2_m2m_dev *m2m_dev;
Pawel Osciak7f986392010-04-23 05:38:37 -0300609 unsigned long flags;
610
611 m2m_dev = m2m_ctx->m2m_dev;
612
613 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
614 if (m2m_ctx->job_flags & TRANS_RUNNING) {
615 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
616 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
617 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300618 wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING));
Pawel Osciak7f986392010-04-23 05:38:37 -0300619 } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
620 list_del(&m2m_ctx->queue);
621 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
622 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
623 dprintk("m2m_ctx: %p had been on queue and was removed\n",
624 m2m_ctx);
625 } else {
626 /* Do nothing, was not on queue/running */
627 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
628 }
629
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300630 vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
631 vb2_queue_release(&m2m_ctx->out_q_ctx.q);
Pawel Osciak7f986392010-04-23 05:38:37 -0300632
633 kfree(m2m_ctx);
634}
635EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
636
637/**
638 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
639 *
640 * Call from buf_queue(), videobuf_queue_ops callback.
Pawel Osciak7f986392010-04-23 05:38:37 -0300641 */
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300642void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
Pawel Osciak7f986392010-04-23 05:38:37 -0300643{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300644 struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb);
Pawel Osciak7f986392010-04-23 05:38:37 -0300645 struct v4l2_m2m_queue_ctx *q_ctx;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300646 unsigned long flags;
Pawel Osciak7f986392010-04-23 05:38:37 -0300647
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300648 q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);
Pawel Osciak7f986392010-04-23 05:38:37 -0300649 if (!q_ctx)
650 return;
651
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300652 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
653 list_add_tail(&b->list, &q_ctx->rdy_queue);
Pawel Osciak7f986392010-04-23 05:38:37 -0300654 q_ctx->num_rdy++;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300655 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
Pawel Osciak7f986392010-04-23 05:38:37 -0300656}
657EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
658