blob: 796de33493032211cb5f754e7edaf9b27c46a80c [file] [log] [blame]
Pawel Osciak7f986392010-04-23 05:38:37 -03001/*
2 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
3 *
4 * Helper functions for devices that use videobuf buffers for both their
5 * source and destination.
6 *
7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
Pawel Osciak95072082011-03-13 15:23:32 -03008 * Pawel Osciak, <pawel@osciak.com>
Pawel Osciak7f986392010-04-23 05:38:37 -03009 * Marek Szyprowski, <m.szyprowski@samsung.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19
Marek Szyprowski908a0d72011-01-12 06:50:24 -030020#include <media/videobuf2-core.h>
Pawel Osciak7f986392010-04-23 05:38:37 -030021#include <media/v4l2-mem2mem.h>
Hans Verkuil08eb8512012-07-18 10:53:04 -030022#include <media/v4l2-dev.h>
23#include <media/v4l2-fh.h>
24#include <media/v4l2-event.h>
Pawel Osciak7f986392010-04-23 05:38:37 -030025
26MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
Pawel Osciak95072082011-03-13 15:23:32 -030027MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
Pawel Osciak7f986392010-04-23 05:38:37 -030028MODULE_LICENSE("GPL");
29
30static bool debug;
31module_param(debug, bool, 0644);
32
33#define dprintk(fmt, arg...) \
34 do { \
35 if (debug) \
36 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
37 } while (0)
38
39
40/* Instance is already queued on the job_queue */
41#define TRANS_QUEUED (1 << 0)
42/* Instance is currently running in hardware */
43#define TRANS_RUNNING (1 << 1)
44
45
46/* Offset base for buffers on the destination queue - used to distinguish
47 * between source and destination buffers when mmapping - they receive the same
48 * offsets but for different queues */
49#define DST_QUEUE_OFF_BASE (1 << 30)
50
51
52/**
53 * struct v4l2_m2m_dev - per-device context
54 * @curr_ctx: currently running instance
55 * @job_queue: instances queued to run
56 * @job_spinlock: protects job_queue
57 * @m2m_ops: driver callbacks
58 */
59struct v4l2_m2m_dev {
60 struct v4l2_m2m_ctx *curr_ctx;
61
62 struct list_head job_queue;
63 spinlock_t job_spinlock;
64
Guennadi Liakhovetskib1252eb2012-09-11 06:32:17 -030065 const struct v4l2_m2m_ops *m2m_ops;
Pawel Osciak7f986392010-04-23 05:38:37 -030066};
67
68static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
69 enum v4l2_buf_type type)
70{
Marek Szyprowski908a0d72011-01-12 06:50:24 -030071 if (V4L2_TYPE_IS_OUTPUT(type))
Pawel Osciak7f986392010-04-23 05:38:37 -030072 return &m2m_ctx->out_q_ctx;
Marek Szyprowski908a0d72011-01-12 06:50:24 -030073 else
74 return &m2m_ctx->cap_q_ctx;
Pawel Osciak7f986392010-04-23 05:38:37 -030075}
76
77/**
Marek Szyprowski908a0d72011-01-12 06:50:24 -030078 * v4l2_m2m_get_vq() - return vb2_queue for the given type
Pawel Osciak7f986392010-04-23 05:38:37 -030079 */
Marek Szyprowski908a0d72011-01-12 06:50:24 -030080struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
Pawel Osciak7f986392010-04-23 05:38:37 -030081 enum v4l2_buf_type type)
82{
83 struct v4l2_m2m_queue_ctx *q_ctx;
84
85 q_ctx = get_queue_ctx(m2m_ctx, type);
86 if (!q_ctx)
87 return NULL;
88
89 return &q_ctx->q;
90}
91EXPORT_SYMBOL(v4l2_m2m_get_vq);
92
93/**
94 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
95 */
Marek Szyprowski908a0d72011-01-12 06:50:24 -030096void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
Pawel Osciak7f986392010-04-23 05:38:37 -030097{
Marek Szyprowski908a0d72011-01-12 06:50:24 -030098 struct v4l2_m2m_buffer *b = NULL;
Pawel Osciak7f986392010-04-23 05:38:37 -030099 unsigned long flags;
100
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300101 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
Pawel Osciak7f986392010-04-23 05:38:37 -0300102
Andrzej Pietrasiewicza6bd62be2011-08-25 07:21:21 -0300103 if (list_empty(&q_ctx->rdy_queue)) {
104 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
105 return NULL;
106 }
Pawel Osciak7f986392010-04-23 05:38:37 -0300107
Sascha Hauerc392e9e2012-08-31 09:18:03 -0300108 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300109 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
110 return &b->vb;
Pawel Osciak7f986392010-04-23 05:38:37 -0300111}
112EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
113
114/**
115 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
116 * return it
117 */
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300118void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
Pawel Osciak7f986392010-04-23 05:38:37 -0300119{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300120 struct v4l2_m2m_buffer *b = NULL;
Pawel Osciak7f986392010-04-23 05:38:37 -0300121 unsigned long flags;
122
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300123 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
Andrzej Pietrasiewicza6bd62be2011-08-25 07:21:21 -0300124 if (list_empty(&q_ctx->rdy_queue)) {
125 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
126 return NULL;
Pawel Osciak7f986392010-04-23 05:38:37 -0300127 }
Sascha Hauerc392e9e2012-08-31 09:18:03 -0300128 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
Andrzej Pietrasiewicza6bd62be2011-08-25 07:21:21 -0300129 list_del(&b->list);
130 q_ctx->num_rdy--;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300131 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
Pawel Osciak7f986392010-04-23 05:38:37 -0300132
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300133 return &b->vb;
Pawel Osciak7f986392010-04-23 05:38:37 -0300134}
135EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
136
137/*
138 * Scheduling handlers
139 */
140
141/**
142 * v4l2_m2m_get_curr_priv() - return driver private data for the currently
143 * running instance or NULL if no instance is running
144 */
145void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
146{
147 unsigned long flags;
148 void *ret = NULL;
149
150 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
151 if (m2m_dev->curr_ctx)
152 ret = m2m_dev->curr_ctx->priv;
153 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
154
155 return ret;
156}
157EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
158
159/**
160 * v4l2_m2m_try_run() - select next job to perform and run it if possible
161 *
162 * Get next transaction (if present) from the waiting jobs list and run it.
163 */
164static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
165{
166 unsigned long flags;
167
168 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
169 if (NULL != m2m_dev->curr_ctx) {
170 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
171 dprintk("Another instance is running, won't run now\n");
172 return;
173 }
174
175 if (list_empty(&m2m_dev->job_queue)) {
176 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
177 dprintk("No job pending\n");
178 return;
179 }
180
Sascha Hauerc392e9e2012-08-31 09:18:03 -0300181 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
Pawel Osciak7f986392010-04-23 05:38:37 -0300182 struct v4l2_m2m_ctx, queue);
183 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
184 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
185
186 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
187}
188
189/**
190 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
191 * the pending job queue and add it if so.
192 * @m2m_ctx: m2m context assigned to the instance to be checked
193 *
194 * There are three basic requirements an instance has to meet to be able to run:
195 * 1) at least one source buffer has to be queued,
196 * 2) at least one destination buffer has to be queued,
197 * 3) streaming has to be on.
198 *
Philipp Zabel33bdd5a2013-06-03 04:23:48 -0300199 * If a queue is buffered (for example a decoder hardware ringbuffer that has
200 * to be drained before doing streamoff), allow scheduling without v4l2 buffers
201 * on that queue.
202 *
Pawel Osciak7f986392010-04-23 05:38:37 -0300203 * There may also be additional, custom requirements. In such case the driver
204 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
205 * return 1 if the instance is ready.
206 * An example of the above could be an instance that requires more than one
207 * src/dst buffer per transaction.
208 */
209static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
210{
211 struct v4l2_m2m_dev *m2m_dev;
John Sheub7306272013-05-23 20:41:48 -0300212 unsigned long flags_job, flags_out, flags_cap;
Pawel Osciak7f986392010-04-23 05:38:37 -0300213
214 m2m_dev = m2m_ctx->m2m_dev;
215 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
216
217 if (!m2m_ctx->out_q_ctx.q.streaming
218 || !m2m_ctx->cap_q_ctx.q.streaming) {
219 dprintk("Streaming needs to be on for both queues\n");
220 return;
221 }
222
223 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
224 if (m2m_ctx->job_flags & TRANS_QUEUED) {
225 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
226 dprintk("On job queue already\n");
227 return;
228 }
229
John Sheub7306272013-05-23 20:41:48 -0300230 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
Philipp Zabel33bdd5a2013-06-03 04:23:48 -0300231 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
232 && !m2m_ctx->out_q_ctx.buffered) {
John Sheub7306272013-05-23 20:41:48 -0300233 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
234 flags_out);
Pawel Osciak7f986392010-04-23 05:38:37 -0300235 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
236 dprintk("No input buffers available\n");
237 return;
238 }
John Sheub7306272013-05-23 20:41:48 -0300239 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
Philipp Zabel33bdd5a2013-06-03 04:23:48 -0300240 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
241 && !m2m_ctx->cap_q_ctx.buffered) {
John Sheub7306272013-05-23 20:41:48 -0300242 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
243 flags_cap);
244 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
245 flags_out);
Pawel Osciak7f986392010-04-23 05:38:37 -0300246 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
247 dprintk("No output buffers available\n");
248 return;
249 }
John Sheub7306272013-05-23 20:41:48 -0300250 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
251 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
Pawel Osciak7f986392010-04-23 05:38:37 -0300252
253 if (m2m_dev->m2m_ops->job_ready
254 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
255 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
256 dprintk("Driver not ready\n");
257 return;
258 }
259
260 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
261 m2m_ctx->job_flags |= TRANS_QUEUED;
262
263 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
264
265 v4l2_m2m_try_run(m2m_dev);
266}
267
268/**
Shaik Ameer Bashafea564a2013-08-13 02:58:07 -0300269 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
270 *
271 * In case of streamoff or release called on any context,
272 * 1] If the context is currently running, then abort job will be called
273 * 2] If the context is queued, then the context will be removed from
274 * the job_queue
275 */
276static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
277{
278 struct v4l2_m2m_dev *m2m_dev;
279 unsigned long flags;
280
281 m2m_dev = m2m_ctx->m2m_dev;
282 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
283 if (m2m_ctx->job_flags & TRANS_RUNNING) {
284 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
285 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
286 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
287 wait_event(m2m_ctx->finished,
288 !(m2m_ctx->job_flags & TRANS_RUNNING));
289 } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
290 list_del(&m2m_ctx->queue);
291 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
292 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
293 dprintk("m2m_ctx: %p had been on queue and was removed\n",
294 m2m_ctx);
295 } else {
296 /* Do nothing, was not on queue/running */
297 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
298 }
299}
300
301/**
Pawel Osciak7f986392010-04-23 05:38:37 -0300302 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
303 * and have it clean up
304 *
305 * Called by a driver to yield back the device after it has finished with it.
306 * Should be called as soon as possible after reaching a state which allows
307 * other instances to take control of the device.
308 *
309 * This function has to be called only after device_run() callback has been
310 * called on the driver. To prevent recursion, it should not be called directly
311 * from the device_run() callback though.
312 */
313void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
314 struct v4l2_m2m_ctx *m2m_ctx)
315{
316 unsigned long flags;
317
318 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
319 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
320 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
321 dprintk("Called by an instance not currently running\n");
322 return;
323 }
324
325 list_del(&m2m_dev->curr_ctx->queue);
326 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300327 wake_up(&m2m_dev->curr_ctx->finished);
Pawel Osciak7f986392010-04-23 05:38:37 -0300328 m2m_dev->curr_ctx = NULL;
329
330 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
331
332 /* This instance might have more buffers ready, but since we do not
333 * allow more than one job on the job_queue per instance, each has
334 * to be scheduled separately after the previous one finishes. */
335 v4l2_m2m_try_schedule(m2m_ctx);
336 v4l2_m2m_try_run(m2m_dev);
337}
338EXPORT_SYMBOL(v4l2_m2m_job_finish);
339
340/**
341 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
342 */
343int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
344 struct v4l2_requestbuffers *reqbufs)
345{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300346 struct vb2_queue *vq;
Pawel Osciak7f986392010-04-23 05:38:37 -0300347
348 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300349 return vb2_reqbufs(vq, reqbufs);
Pawel Osciak7f986392010-04-23 05:38:37 -0300350}
351EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
352
353/**
354 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
355 *
356 * See v4l2_m2m_mmap() documentation for details.
357 */
358int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
359 struct v4l2_buffer *buf)
360{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300361 struct vb2_queue *vq;
362 int ret = 0;
363 unsigned int i;
Pawel Osciak7f986392010-04-23 05:38:37 -0300364
365 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300366 ret = vb2_querybuf(vq, buf);
Pawel Osciak7f986392010-04-23 05:38:37 -0300367
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300368 /* Adjust MMAP memory offsets for the CAPTURE queue */
369 if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
370 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
371 for (i = 0; i < buf->length; ++i)
372 buf->m.planes[i].m.mem_offset
373 += DST_QUEUE_OFF_BASE;
374 } else {
375 buf->m.offset += DST_QUEUE_OFF_BASE;
376 }
Pawel Osciak7f986392010-04-23 05:38:37 -0300377 }
378
379 return ret;
380}
381EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
382
383/**
384 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
385 * the type
386 */
387int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
388 struct v4l2_buffer *buf)
389{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300390 struct vb2_queue *vq;
Pawel Osciak7f986392010-04-23 05:38:37 -0300391 int ret;
392
393 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300394 ret = vb2_qbuf(vq, buf);
Pawel Osciak7f986392010-04-23 05:38:37 -0300395 if (!ret)
396 v4l2_m2m_try_schedule(m2m_ctx);
397
398 return ret;
399}
400EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
401
402/**
403 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
404 * the type
405 */
406int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
407 struct v4l2_buffer *buf)
408{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300409 struct vb2_queue *vq;
Pawel Osciak7f986392010-04-23 05:38:37 -0300410
411 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300412 return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
Pawel Osciak7f986392010-04-23 05:38:37 -0300413}
414EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
415
416/**
Philipp Zabel8b94ca62013-05-21 04:16:28 -0300417 * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
418 * on the type
419 */
420int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
421 struct v4l2_create_buffers *create)
422{
423 struct vb2_queue *vq;
424
425 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
426 return vb2_create_bufs(vq, create);
427}
428EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
429
430/**
Tomasz Stanislawski83ae7c52012-06-14 11:32:24 -0300431 * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
432 * the type
433 */
434int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
435 struct v4l2_exportbuffer *eb)
436{
437 struct vb2_queue *vq;
438
439 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
440 return vb2_expbuf(vq, eb);
441}
442EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
443/**
Pawel Osciak7f986392010-04-23 05:38:37 -0300444 * v4l2_m2m_streamon() - turn on streaming for a video queue
445 */
446int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
447 enum v4l2_buf_type type)
448{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300449 struct vb2_queue *vq;
Pawel Osciak7f986392010-04-23 05:38:37 -0300450 int ret;
451
452 vq = v4l2_m2m_get_vq(m2m_ctx, type);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300453 ret = vb2_streamon(vq, type);
Pawel Osciak7f986392010-04-23 05:38:37 -0300454 if (!ret)
455 v4l2_m2m_try_schedule(m2m_ctx);
456
457 return ret;
458}
459EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
460
461/**
462 * v4l2_m2m_streamoff() - turn off streaming for a video queue
463 */
464int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
465 enum v4l2_buf_type type)
466{
John Sheu401f6a22013-02-06 20:03:01 -0300467 struct v4l2_m2m_dev *m2m_dev;
468 struct v4l2_m2m_queue_ctx *q_ctx;
469 unsigned long flags_job, flags;
470 int ret;
Pawel Osciak7f986392010-04-23 05:38:37 -0300471
Shaik Ameer Bashafea564a2013-08-13 02:58:07 -0300472 /* wait until the current context is dequeued from job_queue */
473 v4l2_m2m_cancel_job(m2m_ctx);
474
John Sheu401f6a22013-02-06 20:03:01 -0300475 q_ctx = get_queue_ctx(m2m_ctx, type);
476 ret = vb2_streamoff(&q_ctx->q, type);
477 if (ret)
478 return ret;
479
480 m2m_dev = m2m_ctx->m2m_dev;
481 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
482 /* We should not be scheduled anymore, since we're dropping a queue. */
Philipp Zabeld7bb0ce82013-09-19 04:40:32 -0300483 if (m2m_ctx->job_flags & TRANS_QUEUED)
484 list_del(&m2m_ctx->queue);
John Sheu401f6a22013-02-06 20:03:01 -0300485 m2m_ctx->job_flags = 0;
486
487 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
488 /* Drop queue, since streamoff returns device to the same state as after
489 * calling reqbufs. */
490 INIT_LIST_HEAD(&q_ctx->rdy_queue);
Philipp Zabel84e68092013-09-19 04:53:21 -0300491 q_ctx->num_rdy = 0;
John Sheu401f6a22013-02-06 20:03:01 -0300492 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
493
494 if (m2m_dev->curr_ctx == m2m_ctx) {
495 m2m_dev->curr_ctx = NULL;
496 wake_up(&m2m_ctx->finished);
497 }
498 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
499
500 return 0;
Pawel Osciak7f986392010-04-23 05:38:37 -0300501}
502EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
503
504/**
505 * v4l2_m2m_poll() - poll replacement, for destination buffers only
506 *
507 * Call from the driver's poll() function. Will poll both queues. If a buffer
508 * is available to dequeue (with dqbuf) from the source queue, this will
509 * indicate that a non-blocking write can be performed, while read will be
510 * returned in case of the destination queue.
511 */
512unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
513 struct poll_table_struct *wait)
514{
Hans Verkuil08eb8512012-07-18 10:53:04 -0300515 struct video_device *vfd = video_devdata(file);
516 unsigned long req_events = poll_requested_events(wait);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300517 struct vb2_queue *src_q, *dst_q;
518 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
Pawel Osciak7f986392010-04-23 05:38:37 -0300519 unsigned int rc = 0;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300520 unsigned long flags;
Pawel Osciak7f986392010-04-23 05:38:37 -0300521
Hans Verkuil08eb8512012-07-18 10:53:04 -0300522 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
523 struct v4l2_fh *fh = file->private_data;
524
525 if (v4l2_event_pending(fh))
526 rc = POLLPRI;
527 else if (req_events & POLLPRI)
528 poll_wait(file, &fh->wait, wait);
529 if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
530 return rc;
531 }
532
Pawel Osciak7f986392010-04-23 05:38:37 -0300533 src_q = v4l2_m2m_get_src_vq(m2m_ctx);
534 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
535
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300536 /*
537 * There has to be at least one buffer queued on each queued_list, which
538 * means either in driver already or waiting for driver to claim it
539 * and start processing.
540 */
541 if ((!src_q->streaming || list_empty(&src_q->queued_list))
542 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
Hans Verkuil08eb8512012-07-18 10:53:04 -0300543 rc |= POLLERR;
Pawel Osciak7f986392010-04-23 05:38:37 -0300544 goto end;
545 }
546
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300547 if (m2m_ctx->m2m_dev->m2m_ops->unlock)
548 m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
549
Seung-Woo Kim57183462013-05-20 23:47:30 -0300550 if (list_empty(&src_q->done_list))
551 poll_wait(file, &src_q->done_wq, wait);
552 if (list_empty(&dst_q->done_list))
553 poll_wait(file, &dst_q->done_wq, wait);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300554
555 if (m2m_ctx->m2m_dev->m2m_ops->lock)
556 m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
557
558 spin_lock_irqsave(&src_q->done_lock, flags);
559 if (!list_empty(&src_q->done_list))
560 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
561 done_entry);
562 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
563 || src_vb->state == VB2_BUF_STATE_ERROR))
564 rc |= POLLOUT | POLLWRNORM;
565 spin_unlock_irqrestore(&src_q->done_lock, flags);
566
567 spin_lock_irqsave(&dst_q->done_lock, flags);
568 if (!list_empty(&dst_q->done_list))
569 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
570 done_entry);
571 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
572 || dst_vb->state == VB2_BUF_STATE_ERROR))
573 rc |= POLLIN | POLLRDNORM;
574 spin_unlock_irqrestore(&dst_q->done_lock, flags);
Pawel Osciak7f986392010-04-23 05:38:37 -0300575
576end:
Pawel Osciak7f986392010-04-23 05:38:37 -0300577 return rc;
578}
579EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
580
581/**
582 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
583 *
584 * Call from driver's mmap() function. Will handle mmap() for both queues
585 * seamlessly for videobuffer, which will receive normal per-queue offsets and
586 * proper videobuf queue pointers. The differentiation is made outside videobuf
587 * by adding a predefined offset to buffers from one of the queues and
588 * subtracting it before passing it back to videobuf. Only drivers (and
589 * thus applications) receive modified offsets.
590 */
591int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
592 struct vm_area_struct *vma)
593{
594 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300595 struct vb2_queue *vq;
Pawel Osciak7f986392010-04-23 05:38:37 -0300596
597 if (offset < DST_QUEUE_OFF_BASE) {
598 vq = v4l2_m2m_get_src_vq(m2m_ctx);
599 } else {
600 vq = v4l2_m2m_get_dst_vq(m2m_ctx);
601 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
602 }
603
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300604 return vb2_mmap(vq, vma);
Pawel Osciak7f986392010-04-23 05:38:37 -0300605}
606EXPORT_SYMBOL(v4l2_m2m_mmap);
607
608/**
609 * v4l2_m2m_init() - initialize per-driver m2m data
610 *
611 * Usually called from driver's probe() function.
612 */
Guennadi Liakhovetskib1252eb2012-09-11 06:32:17 -0300613struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
Pawel Osciak7f986392010-04-23 05:38:37 -0300614{
615 struct v4l2_m2m_dev *m2m_dev;
616
Nicolas THERY3fac4eb2012-10-23 04:47:19 -0300617 if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
618 WARN_ON(!m2m_ops->job_abort))
Pawel Osciak7f986392010-04-23 05:38:37 -0300619 return ERR_PTR(-EINVAL);
620
Pawel Osciak7f986392010-04-23 05:38:37 -0300621 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
622 if (!m2m_dev)
623 return ERR_PTR(-ENOMEM);
624
625 m2m_dev->curr_ctx = NULL;
626 m2m_dev->m2m_ops = m2m_ops;
627 INIT_LIST_HEAD(&m2m_dev->job_queue);
628 spin_lock_init(&m2m_dev->job_spinlock);
629
630 return m2m_dev;
631}
632EXPORT_SYMBOL_GPL(v4l2_m2m_init);
633
634/**
635 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
636 *
637 * Usually called from driver's remove() function.
638 */
639void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
640{
641 kfree(m2m_dev);
642}
643EXPORT_SYMBOL_GPL(v4l2_m2m_release);
644
645/**
646 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
647 * @priv - driver's instance private data
648 * @m2m_dev - a previously initialized m2m_dev struct
649 * @vq_init - a callback for queue type-specific initialization function to be
650 * used for initializing videobuf_queues
651 *
652 * Usually called from driver's open() function.
653 */
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300654struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
655 void *drv_priv,
656 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
Pawel Osciak7f986392010-04-23 05:38:37 -0300657{
658 struct v4l2_m2m_ctx *m2m_ctx;
659 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300660 int ret;
Pawel Osciak7f986392010-04-23 05:38:37 -0300661
662 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
663 if (!m2m_ctx)
664 return ERR_PTR(-ENOMEM);
665
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300666 m2m_ctx->priv = drv_priv;
Pawel Osciak7f986392010-04-23 05:38:37 -0300667 m2m_ctx->m2m_dev = m2m_dev;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300668 init_waitqueue_head(&m2m_ctx->finished);
Pawel Osciak7f986392010-04-23 05:38:37 -0300669
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300670 out_q_ctx = &m2m_ctx->out_q_ctx;
671 cap_q_ctx = &m2m_ctx->cap_q_ctx;
Pawel Osciak7f986392010-04-23 05:38:37 -0300672
673 INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
674 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300675 spin_lock_init(&out_q_ctx->rdy_spinlock);
676 spin_lock_init(&cap_q_ctx->rdy_spinlock);
Pawel Osciak7f986392010-04-23 05:38:37 -0300677
678 INIT_LIST_HEAD(&m2m_ctx->queue);
679
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300680 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
681
682 if (ret)
683 goto err;
Pawel Osciak7f986392010-04-23 05:38:37 -0300684
685 return m2m_ctx;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300686err:
687 kfree(m2m_ctx);
688 return ERR_PTR(ret);
Pawel Osciak7f986392010-04-23 05:38:37 -0300689}
690EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
691
692/**
693 * v4l2_m2m_ctx_release() - release m2m context
694 *
695 * Usually called from driver's release() function.
696 */
697void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
698{
Shaik Ameer Bashafea564a2013-08-13 02:58:07 -0300699 /* wait until the current context is dequeued from job_queue */
700 v4l2_m2m_cancel_job(m2m_ctx);
Pawel Osciak7f986392010-04-23 05:38:37 -0300701
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300702 vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
703 vb2_queue_release(&m2m_ctx->out_q_ctx.q);
Pawel Osciak7f986392010-04-23 05:38:37 -0300704
705 kfree(m2m_ctx);
706}
707EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
708
709/**
710 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
711 *
712 * Call from buf_queue(), videobuf_queue_ops callback.
Pawel Osciak7f986392010-04-23 05:38:37 -0300713 */
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300714void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
Pawel Osciak7f986392010-04-23 05:38:37 -0300715{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300716 struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb);
Pawel Osciak7f986392010-04-23 05:38:37 -0300717 struct v4l2_m2m_queue_ctx *q_ctx;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300718 unsigned long flags;
Pawel Osciak7f986392010-04-23 05:38:37 -0300719
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300720 q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);
Pawel Osciak7f986392010-04-23 05:38:37 -0300721 if (!q_ctx)
722 return;
723
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300724 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
725 list_add_tail(&b->list, &q_ctx->rdy_queue);
Pawel Osciak7f986392010-04-23 05:38:37 -0300726 q_ctx->num_rdy++;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300727 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
Pawel Osciak7f986392010-04-23 05:38:37 -0300728}
729EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
730