blob: 6bc27e7b2a33857b24c7891f1cd742534e369c07 [file] [log] [blame]
Pawel Osciak7f986392010-04-23 05:38:37 -03001/*
2 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
3 *
4 * Helper functions for devices that use videobuf buffers for both their
5 * source and destination.
6 *
7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
Pawel Osciak95072082011-03-13 15:23:32 -03008 * Pawel Osciak, <pawel@osciak.com>
Pawel Osciak7f986392010-04-23 05:38:37 -03009 * Marek Szyprowski, <m.szyprowski@samsung.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19
Junghak Sungc1399902015-09-22 10:30:29 -030020#include <media/videobuf2-v4l2.h>
Pawel Osciak7f986392010-04-23 05:38:37 -030021#include <media/v4l2-mem2mem.h>
Hans Verkuil08eb8512012-07-18 10:53:04 -030022#include <media/v4l2-dev.h>
23#include <media/v4l2-fh.h>
24#include <media/v4l2-event.h>
Pawel Osciak7f986392010-04-23 05:38:37 -030025
26MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
Pawel Osciak95072082011-03-13 15:23:32 -030027MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
Pawel Osciak7f986392010-04-23 05:38:37 -030028MODULE_LICENSE("GPL");
29
30static bool debug;
31module_param(debug, bool, 0644);
32
33#define dprintk(fmt, arg...) \
34 do { \
35 if (debug) \
36 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
37 } while (0)
38
39
40/* Instance is already queued on the job_queue */
41#define TRANS_QUEUED (1 << 0)
42/* Instance is currently running in hardware */
43#define TRANS_RUNNING (1 << 1)
Shaik Ameer Basha2ad53892013-09-20 03:26:18 -030044/* Instance is currently aborting */
45#define TRANS_ABORT (1 << 2)
Pawel Osciak7f986392010-04-23 05:38:37 -030046
47
48/* Offset base for buffers on the destination queue - used to distinguish
49 * between source and destination buffers when mmapping - they receive the same
50 * offsets but for different queues */
51#define DST_QUEUE_OFF_BASE (1 << 30)
52
53
54/**
55 * struct v4l2_m2m_dev - per-device context
56 * @curr_ctx: currently running instance
57 * @job_queue: instances queued to run
58 * @job_spinlock: protects job_queue
59 * @m2m_ops: driver callbacks
60 */
61struct v4l2_m2m_dev {
62 struct v4l2_m2m_ctx *curr_ctx;
63
64 struct list_head job_queue;
65 spinlock_t job_spinlock;
66
Guennadi Liakhovetskib1252eb2012-09-11 06:32:17 -030067 const struct v4l2_m2m_ops *m2m_ops;
Pawel Osciak7f986392010-04-23 05:38:37 -030068};
69
70static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
71 enum v4l2_buf_type type)
72{
Marek Szyprowski908a0d72011-01-12 06:50:24 -030073 if (V4L2_TYPE_IS_OUTPUT(type))
Pawel Osciak7f986392010-04-23 05:38:37 -030074 return &m2m_ctx->out_q_ctx;
Marek Szyprowski908a0d72011-01-12 06:50:24 -030075 else
76 return &m2m_ctx->cap_q_ctx;
Pawel Osciak7f986392010-04-23 05:38:37 -030077}
78
Marek Szyprowski908a0d72011-01-12 06:50:24 -030079struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
Pawel Osciak7f986392010-04-23 05:38:37 -030080 enum v4l2_buf_type type)
81{
82 struct v4l2_m2m_queue_ctx *q_ctx;
83
84 q_ctx = get_queue_ctx(m2m_ctx, type);
85 if (!q_ctx)
86 return NULL;
87
88 return &q_ctx->q;
89}
90EXPORT_SYMBOL(v4l2_m2m_get_vq);
91
Marek Szyprowski908a0d72011-01-12 06:50:24 -030092void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
Pawel Osciak7f986392010-04-23 05:38:37 -030093{
Philipp Zabeld5451c12015-03-11 12:57:50 -030094 struct v4l2_m2m_buffer *b;
Pawel Osciak7f986392010-04-23 05:38:37 -030095 unsigned long flags;
96
Marek Szyprowski908a0d72011-01-12 06:50:24 -030097 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
Pawel Osciak7f986392010-04-23 05:38:37 -030098
Andrzej Pietrasiewicza6bd62be2011-08-25 07:21:21 -030099 if (list_empty(&q_ctx->rdy_queue)) {
100 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
101 return NULL;
102 }
Pawel Osciak7f986392010-04-23 05:38:37 -0300103
Sascha Hauerc392e9e2012-08-31 09:18:03 -0300104 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300105 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
106 return &b->vb;
Pawel Osciak7f986392010-04-23 05:38:37 -0300107}
108EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
109
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300110void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
Pawel Osciak7f986392010-04-23 05:38:37 -0300111{
Philipp Zabeld5451c12015-03-11 12:57:50 -0300112 struct v4l2_m2m_buffer *b;
Pawel Osciak7f986392010-04-23 05:38:37 -0300113 unsigned long flags;
114
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300115 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
Andrzej Pietrasiewicza6bd62be2011-08-25 07:21:21 -0300116 if (list_empty(&q_ctx->rdy_queue)) {
117 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
118 return NULL;
Pawel Osciak7f986392010-04-23 05:38:37 -0300119 }
Sascha Hauerc392e9e2012-08-31 09:18:03 -0300120 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
Andrzej Pietrasiewicza6bd62be2011-08-25 07:21:21 -0300121 list_del(&b->list);
122 q_ctx->num_rdy--;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300123 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
Pawel Osciak7f986392010-04-23 05:38:37 -0300124
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300125 return &b->vb;
Pawel Osciak7f986392010-04-23 05:38:37 -0300126}
127EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
128
129/*
130 * Scheduling handlers
131 */
132
Pawel Osciak7f986392010-04-23 05:38:37 -0300133void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
134{
135 unsigned long flags;
136 void *ret = NULL;
137
138 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
139 if (m2m_dev->curr_ctx)
140 ret = m2m_dev->curr_ctx->priv;
141 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
142
143 return ret;
144}
145EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
146
147/**
148 * v4l2_m2m_try_run() - select next job to perform and run it if possible
149 *
150 * Get next transaction (if present) from the waiting jobs list and run it.
151 */
152static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
153{
154 unsigned long flags;
155
156 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
157 if (NULL != m2m_dev->curr_ctx) {
158 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
159 dprintk("Another instance is running, won't run now\n");
160 return;
161 }
162
163 if (list_empty(&m2m_dev->job_queue)) {
164 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
165 dprintk("No job pending\n");
166 return;
167 }
168
Sascha Hauerc392e9e2012-08-31 09:18:03 -0300169 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
Pawel Osciak7f986392010-04-23 05:38:37 -0300170 struct v4l2_m2m_ctx, queue);
171 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
172 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
173
174 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
175}
176
Michael Olbrich1190a412014-07-22 09:36:04 -0300177void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
Pawel Osciak7f986392010-04-23 05:38:37 -0300178{
179 struct v4l2_m2m_dev *m2m_dev;
John Sheub7306272013-05-23 20:41:48 -0300180 unsigned long flags_job, flags_out, flags_cap;
Pawel Osciak7f986392010-04-23 05:38:37 -0300181
182 m2m_dev = m2m_ctx->m2m_dev;
183 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
184
185 if (!m2m_ctx->out_q_ctx.q.streaming
186 || !m2m_ctx->cap_q_ctx.q.streaming) {
187 dprintk("Streaming needs to be on for both queues\n");
188 return;
189 }
190
191 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
Shaik Ameer Basha2ad53892013-09-20 03:26:18 -0300192
193 /* If the context is aborted then don't schedule it */
194 if (m2m_ctx->job_flags & TRANS_ABORT) {
195 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
196 dprintk("Aborted context\n");
197 return;
198 }
199
Pawel Osciak7f986392010-04-23 05:38:37 -0300200 if (m2m_ctx->job_flags & TRANS_QUEUED) {
201 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
202 dprintk("On job queue already\n");
203 return;
204 }
205
John Sheub7306272013-05-23 20:41:48 -0300206 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
Philipp Zabel33bdd5a2013-06-03 04:23:48 -0300207 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
208 && !m2m_ctx->out_q_ctx.buffered) {
John Sheub7306272013-05-23 20:41:48 -0300209 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
210 flags_out);
Pawel Osciak7f986392010-04-23 05:38:37 -0300211 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
212 dprintk("No input buffers available\n");
213 return;
214 }
John Sheub7306272013-05-23 20:41:48 -0300215 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
Philipp Zabel33bdd5a2013-06-03 04:23:48 -0300216 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
217 && !m2m_ctx->cap_q_ctx.buffered) {
John Sheub7306272013-05-23 20:41:48 -0300218 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
219 flags_cap);
220 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
221 flags_out);
Pawel Osciak7f986392010-04-23 05:38:37 -0300222 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
223 dprintk("No output buffers available\n");
224 return;
225 }
John Sheub7306272013-05-23 20:41:48 -0300226 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
227 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
Pawel Osciak7f986392010-04-23 05:38:37 -0300228
229 if (m2m_dev->m2m_ops->job_ready
230 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
231 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
232 dprintk("Driver not ready\n");
233 return;
234 }
235
236 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
237 m2m_ctx->job_flags |= TRANS_QUEUED;
238
239 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
240
241 v4l2_m2m_try_run(m2m_dev);
242}
Michael Olbrich1190a412014-07-22 09:36:04 -0300243EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
Pawel Osciak7f986392010-04-23 05:38:37 -0300244
245/**
Shaik Ameer Bashafea564a2013-08-13 02:58:07 -0300246 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
247 *
248 * In case of streamoff or release called on any context,
249 * 1] If the context is currently running, then abort job will be called
250 * 2] If the context is queued, then the context will be removed from
251 * the job_queue
252 */
253static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
254{
255 struct v4l2_m2m_dev *m2m_dev;
256 unsigned long flags;
257
258 m2m_dev = m2m_ctx->m2m_dev;
259 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
Shaik Ameer Basha2ad53892013-09-20 03:26:18 -0300260
261 m2m_ctx->job_flags |= TRANS_ABORT;
Shaik Ameer Bashafea564a2013-08-13 02:58:07 -0300262 if (m2m_ctx->job_flags & TRANS_RUNNING) {
263 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
264 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
265 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
266 wait_event(m2m_ctx->finished,
267 !(m2m_ctx->job_flags & TRANS_RUNNING));
268 } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
269 list_del(&m2m_ctx->queue);
270 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
271 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
272 dprintk("m2m_ctx: %p had been on queue and was removed\n",
273 m2m_ctx);
274 } else {
275 /* Do nothing, was not on queue/running */
276 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
277 }
278}
279
Pawel Osciak7f986392010-04-23 05:38:37 -0300280void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
281 struct v4l2_m2m_ctx *m2m_ctx)
282{
283 unsigned long flags;
284
285 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
286 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
287 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
288 dprintk("Called by an instance not currently running\n");
289 return;
290 }
291
292 list_del(&m2m_dev->curr_ctx->queue);
293 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300294 wake_up(&m2m_dev->curr_ctx->finished);
Pawel Osciak7f986392010-04-23 05:38:37 -0300295 m2m_dev->curr_ctx = NULL;
296
297 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
298
299 /* This instance might have more buffers ready, but since we do not
300 * allow more than one job on the job_queue per instance, each has
301 * to be scheduled separately after the previous one finishes. */
302 v4l2_m2m_try_schedule(m2m_ctx);
303 v4l2_m2m_try_run(m2m_dev);
304}
305EXPORT_SYMBOL(v4l2_m2m_job_finish);
306
Pawel Osciak7f986392010-04-23 05:38:37 -0300307int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
308 struct v4l2_requestbuffers *reqbufs)
309{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300310 struct vb2_queue *vq;
Philipp Zabelc13a5cc2015-07-10 10:49:25 -0300311 int ret;
Pawel Osciak7f986392010-04-23 05:38:37 -0300312
313 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
Philipp Zabelc13a5cc2015-07-10 10:49:25 -0300314 ret = vb2_reqbufs(vq, reqbufs);
315 /* If count == 0, then the owner has released all buffers and he
316 is no longer owner of the queue. Otherwise we have an owner. */
317 if (ret == 0)
318 vq->owner = reqbufs->count ? file->private_data : NULL;
319
320 return ret;
Pawel Osciak7f986392010-04-23 05:38:37 -0300321}
322EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
323
Pawel Osciak7f986392010-04-23 05:38:37 -0300324int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
325 struct v4l2_buffer *buf)
326{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300327 struct vb2_queue *vq;
328 int ret = 0;
329 unsigned int i;
Pawel Osciak7f986392010-04-23 05:38:37 -0300330
331 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300332 ret = vb2_querybuf(vq, buf);
Pawel Osciak7f986392010-04-23 05:38:37 -0300333
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300334 /* Adjust MMAP memory offsets for the CAPTURE queue */
335 if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
336 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
337 for (i = 0; i < buf->length; ++i)
338 buf->m.planes[i].m.mem_offset
339 += DST_QUEUE_OFF_BASE;
340 } else {
341 buf->m.offset += DST_QUEUE_OFF_BASE;
342 }
Pawel Osciak7f986392010-04-23 05:38:37 -0300343 }
344
345 return ret;
346}
347EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
348
Pawel Osciak7f986392010-04-23 05:38:37 -0300349int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
350 struct v4l2_buffer *buf)
351{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300352 struct vb2_queue *vq;
Pawel Osciak7f986392010-04-23 05:38:37 -0300353 int ret;
354
355 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300356 ret = vb2_qbuf(vq, buf);
Pawel Osciak7f986392010-04-23 05:38:37 -0300357 if (!ret)
358 v4l2_m2m_try_schedule(m2m_ctx);
359
360 return ret;
361}
362EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
363
Pawel Osciak7f986392010-04-23 05:38:37 -0300364int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
365 struct v4l2_buffer *buf)
366{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300367 struct vb2_queue *vq;
Pawel Osciak7f986392010-04-23 05:38:37 -0300368
369 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300370 return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
Pawel Osciak7f986392010-04-23 05:38:37 -0300371}
372EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
373
Hans Verkuile68cf472015-06-05 11:28:50 -0300374int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
375 struct v4l2_buffer *buf)
376{
377 struct vb2_queue *vq;
378 int ret;
379
380 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
381 ret = vb2_prepare_buf(vq, buf);
382 if (!ret)
383 v4l2_m2m_try_schedule(m2m_ctx);
384
385 return ret;
386}
387EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
388
Philipp Zabel8b94ca62013-05-21 04:16:28 -0300389int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
390 struct v4l2_create_buffers *create)
391{
392 struct vb2_queue *vq;
393
394 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
395 return vb2_create_bufs(vq, create);
396}
397EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
398
Tomasz Stanislawski83ae7c52012-06-14 11:32:24 -0300399int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
400 struct v4l2_exportbuffer *eb)
401{
402 struct vb2_queue *vq;
403
404 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
405 return vb2_expbuf(vq, eb);
406}
407EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
Mauro Carvalho Chehab47816462016-09-08 10:16:27 -0300408
Pawel Osciak7f986392010-04-23 05:38:37 -0300409int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
410 enum v4l2_buf_type type)
411{
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300412 struct vb2_queue *vq;
Pawel Osciak7f986392010-04-23 05:38:37 -0300413 int ret;
414
415 vq = v4l2_m2m_get_vq(m2m_ctx, type);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300416 ret = vb2_streamon(vq, type);
Pawel Osciak7f986392010-04-23 05:38:37 -0300417 if (!ret)
418 v4l2_m2m_try_schedule(m2m_ctx);
419
420 return ret;
421}
422EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
423
Pawel Osciak7f986392010-04-23 05:38:37 -0300424int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
425 enum v4l2_buf_type type)
426{
John Sheu401f6a22013-02-06 20:03:01 -0300427 struct v4l2_m2m_dev *m2m_dev;
428 struct v4l2_m2m_queue_ctx *q_ctx;
429 unsigned long flags_job, flags;
430 int ret;
Pawel Osciak7f986392010-04-23 05:38:37 -0300431
Shaik Ameer Bashafea564a2013-08-13 02:58:07 -0300432 /* wait until the current context is dequeued from job_queue */
433 v4l2_m2m_cancel_job(m2m_ctx);
434
John Sheu401f6a22013-02-06 20:03:01 -0300435 q_ctx = get_queue_ctx(m2m_ctx, type);
436 ret = vb2_streamoff(&q_ctx->q, type);
437 if (ret)
438 return ret;
439
440 m2m_dev = m2m_ctx->m2m_dev;
441 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
442 /* We should not be scheduled anymore, since we're dropping a queue. */
Philipp Zabeld7bb0ce82013-09-19 04:40:32 -0300443 if (m2m_ctx->job_flags & TRANS_QUEUED)
444 list_del(&m2m_ctx->queue);
John Sheu401f6a22013-02-06 20:03:01 -0300445 m2m_ctx->job_flags = 0;
446
447 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
448 /* Drop queue, since streamoff returns device to the same state as after
449 * calling reqbufs. */
450 INIT_LIST_HEAD(&q_ctx->rdy_queue);
Philipp Zabel84e68092013-09-19 04:53:21 -0300451 q_ctx->num_rdy = 0;
John Sheu401f6a22013-02-06 20:03:01 -0300452 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
453
454 if (m2m_dev->curr_ctx == m2m_ctx) {
455 m2m_dev->curr_ctx = NULL;
456 wake_up(&m2m_ctx->finished);
457 }
458 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
459
460 return 0;
Pawel Osciak7f986392010-04-23 05:38:37 -0300461}
462EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
463
Pawel Osciak7f986392010-04-23 05:38:37 -0300464unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
465 struct poll_table_struct *wait)
466{
Hans Verkuil08eb8512012-07-18 10:53:04 -0300467 struct video_device *vfd = video_devdata(file);
468 unsigned long req_events = poll_requested_events(wait);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300469 struct vb2_queue *src_q, *dst_q;
470 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
Pawel Osciak7f986392010-04-23 05:38:37 -0300471 unsigned int rc = 0;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300472 unsigned long flags;
Pawel Osciak7f986392010-04-23 05:38:37 -0300473
Hans Verkuil08eb8512012-07-18 10:53:04 -0300474 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
475 struct v4l2_fh *fh = file->private_data;
476
477 if (v4l2_event_pending(fh))
478 rc = POLLPRI;
479 else if (req_events & POLLPRI)
480 poll_wait(file, &fh->wait, wait);
481 if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
482 return rc;
483 }
484
Pawel Osciak7f986392010-04-23 05:38:37 -0300485 src_q = v4l2_m2m_get_src_vq(m2m_ctx);
486 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
487
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300488 /*
489 * There has to be at least one buffer queued on each queued_list, which
490 * means either in driver already or waiting for driver to claim it
491 * and start processing.
492 */
493 if ((!src_q->streaming || list_empty(&src_q->queued_list))
494 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
Hans Verkuil08eb8512012-07-18 10:53:04 -0300495 rc |= POLLERR;
Pawel Osciak7f986392010-04-23 05:38:37 -0300496 goto end;
497 }
498
Zahari Doychevf1a81af2015-08-17 07:13:53 -0300499 spin_lock_irqsave(&src_q->done_lock, flags);
Seung-Woo Kim57183462013-05-20 23:47:30 -0300500 if (list_empty(&src_q->done_list))
501 poll_wait(file, &src_q->done_wq, wait);
Zahari Doychevf1a81af2015-08-17 07:13:53 -0300502 spin_unlock_irqrestore(&src_q->done_lock, flags);
503
504 spin_lock_irqsave(&dst_q->done_lock, flags);
Philipp Zabelc1621842015-05-04 07:51:06 -0300505 if (list_empty(&dst_q->done_list)) {
506 /*
507 * If the last buffer was dequeued from the capture queue,
508 * return immediately. DQBUF will return -EPIPE.
509 */
Zahari Doychevf1a81af2015-08-17 07:13:53 -0300510 if (dst_q->last_buffer_dequeued) {
511 spin_unlock_irqrestore(&dst_q->done_lock, flags);
Philipp Zabelc1621842015-05-04 07:51:06 -0300512 return rc | POLLIN | POLLRDNORM;
Zahari Doychevf1a81af2015-08-17 07:13:53 -0300513 }
Philipp Zabelc1621842015-05-04 07:51:06 -0300514
Seung-Woo Kim57183462013-05-20 23:47:30 -0300515 poll_wait(file, &dst_q->done_wq, wait);
Philipp Zabelc1621842015-05-04 07:51:06 -0300516 }
Zahari Doychevf1a81af2015-08-17 07:13:53 -0300517 spin_unlock_irqrestore(&dst_q->done_lock, flags);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300518
519 spin_lock_irqsave(&src_q->done_lock, flags);
520 if (!list_empty(&src_q->done_list))
521 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
522 done_entry);
523 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
524 || src_vb->state == VB2_BUF_STATE_ERROR))
525 rc |= POLLOUT | POLLWRNORM;
526 spin_unlock_irqrestore(&src_q->done_lock, flags);
527
528 spin_lock_irqsave(&dst_q->done_lock, flags);
529 if (!list_empty(&dst_q->done_list))
530 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
531 done_entry);
532 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
533 || dst_vb->state == VB2_BUF_STATE_ERROR))
534 rc |= POLLIN | POLLRDNORM;
535 spin_unlock_irqrestore(&dst_q->done_lock, flags);
Pawel Osciak7f986392010-04-23 05:38:37 -0300536
537end:
Pawel Osciak7f986392010-04-23 05:38:37 -0300538 return rc;
539}
540EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
541
Pawel Osciak7f986392010-04-23 05:38:37 -0300542int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
543 struct vm_area_struct *vma)
544{
545 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300546 struct vb2_queue *vq;
Pawel Osciak7f986392010-04-23 05:38:37 -0300547
548 if (offset < DST_QUEUE_OFF_BASE) {
549 vq = v4l2_m2m_get_src_vq(m2m_ctx);
550 } else {
551 vq = v4l2_m2m_get_dst_vq(m2m_ctx);
552 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
553 }
554
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300555 return vb2_mmap(vq, vma);
Pawel Osciak7f986392010-04-23 05:38:37 -0300556}
557EXPORT_SYMBOL(v4l2_m2m_mmap);
558
Guennadi Liakhovetskib1252eb2012-09-11 06:32:17 -0300559struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
Pawel Osciak7f986392010-04-23 05:38:37 -0300560{
561 struct v4l2_m2m_dev *m2m_dev;
562
Nicolas THERY3fac4eb2012-10-23 04:47:19 -0300563 if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
564 WARN_ON(!m2m_ops->job_abort))
Pawel Osciak7f986392010-04-23 05:38:37 -0300565 return ERR_PTR(-EINVAL);
566
Pawel Osciak7f986392010-04-23 05:38:37 -0300567 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
568 if (!m2m_dev)
569 return ERR_PTR(-ENOMEM);
570
571 m2m_dev->curr_ctx = NULL;
572 m2m_dev->m2m_ops = m2m_ops;
573 INIT_LIST_HEAD(&m2m_dev->job_queue);
574 spin_lock_init(&m2m_dev->job_spinlock);
575
576 return m2m_dev;
577}
578EXPORT_SYMBOL_GPL(v4l2_m2m_init);
579
Pawel Osciak7f986392010-04-23 05:38:37 -0300580void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
581{
582 kfree(m2m_dev);
583}
584EXPORT_SYMBOL_GPL(v4l2_m2m_release);
585
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300586struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
587 void *drv_priv,
588 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
Pawel Osciak7f986392010-04-23 05:38:37 -0300589{
590 struct v4l2_m2m_ctx *m2m_ctx;
591 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300592 int ret;
Pawel Osciak7f986392010-04-23 05:38:37 -0300593
594 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
595 if (!m2m_ctx)
596 return ERR_PTR(-ENOMEM);
597
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300598 m2m_ctx->priv = drv_priv;
Pawel Osciak7f986392010-04-23 05:38:37 -0300599 m2m_ctx->m2m_dev = m2m_dev;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300600 init_waitqueue_head(&m2m_ctx->finished);
Pawel Osciak7f986392010-04-23 05:38:37 -0300601
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300602 out_q_ctx = &m2m_ctx->out_q_ctx;
603 cap_q_ctx = &m2m_ctx->cap_q_ctx;
Pawel Osciak7f986392010-04-23 05:38:37 -0300604
605 INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
606 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300607 spin_lock_init(&out_q_ctx->rdy_spinlock);
608 spin_lock_init(&cap_q_ctx->rdy_spinlock);
Pawel Osciak7f986392010-04-23 05:38:37 -0300609
610 INIT_LIST_HEAD(&m2m_ctx->queue);
611
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300612 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
613
614 if (ret)
615 goto err;
Sylwester Nawrocki8e6e8f92013-09-14 18:39:04 -0300616 /*
617 * If both queues use same mutex assign it as the common buffer
618 * queues lock to the m2m context. This lock is used in the
619 * v4l2_m2m_ioctl_* helpers.
620 */
621 if (out_q_ctx->q.lock == cap_q_ctx->q.lock)
622 m2m_ctx->q_lock = out_q_ctx->q.lock;
Pawel Osciak7f986392010-04-23 05:38:37 -0300623
624 return m2m_ctx;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300625err:
626 kfree(m2m_ctx);
627 return ERR_PTR(ret);
Pawel Osciak7f986392010-04-23 05:38:37 -0300628}
629EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
630
Pawel Osciak7f986392010-04-23 05:38:37 -0300631void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
632{
Shaik Ameer Bashafea564a2013-08-13 02:58:07 -0300633 /* wait until the current context is dequeued from job_queue */
634 v4l2_m2m_cancel_job(m2m_ctx);
Pawel Osciak7f986392010-04-23 05:38:37 -0300635
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300636 vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
637 vb2_queue_release(&m2m_ctx->out_q_ctx.q);
Pawel Osciak7f986392010-04-23 05:38:37 -0300638
639 kfree(m2m_ctx);
640}
641EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
642
Junghak Sung2d700712015-09-22 10:30:30 -0300643void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
644 struct vb2_v4l2_buffer *vbuf)
Pawel Osciak7f986392010-04-23 05:38:37 -0300645{
Junghak Sung2d700712015-09-22 10:30:30 -0300646 struct v4l2_m2m_buffer *b = container_of(vbuf,
647 struct v4l2_m2m_buffer, vb);
Pawel Osciak7f986392010-04-23 05:38:37 -0300648 struct v4l2_m2m_queue_ctx *q_ctx;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300649 unsigned long flags;
Pawel Osciak7f986392010-04-23 05:38:37 -0300650
Junghak Sung2d700712015-09-22 10:30:30 -0300651 q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type);
Pawel Osciak7f986392010-04-23 05:38:37 -0300652 if (!q_ctx)
653 return;
654
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300655 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
656 list_add_tail(&b->list, &q_ctx->rdy_queue);
Pawel Osciak7f986392010-04-23 05:38:37 -0300657 q_ctx->num_rdy++;
Marek Szyprowski908a0d72011-01-12 06:50:24 -0300658 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
Pawel Osciak7f986392010-04-23 05:38:37 -0300659}
660EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
661
Sylwester Nawrocki8e6e8f92013-09-14 18:39:04 -0300662/* Videobuf2 ioctl helpers */
663
664int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
665 struct v4l2_requestbuffers *rb)
666{
667 struct v4l2_fh *fh = file->private_data;
668
669 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
670}
671EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
672
673int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
674 struct v4l2_create_buffers *create)
675{
676 struct v4l2_fh *fh = file->private_data;
677
678 return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
679}
680EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
681
682int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
683 struct v4l2_buffer *buf)
684{
685 struct v4l2_fh *fh = file->private_data;
686
687 return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
688}
689EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
690
691int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
692 struct v4l2_buffer *buf)
693{
694 struct v4l2_fh *fh = file->private_data;
695
696 return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
697}
698EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
699
700int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
701 struct v4l2_buffer *buf)
702{
703 struct v4l2_fh *fh = file->private_data;
704
705 return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
706}
707EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
708
Hans Verkuile68cf472015-06-05 11:28:50 -0300709int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
710 struct v4l2_buffer *buf)
711{
712 struct v4l2_fh *fh = file->private_data;
713
714 return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf);
715}
716EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf);
717
Sylwester Nawrocki8e6e8f92013-09-14 18:39:04 -0300718int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
719 struct v4l2_exportbuffer *eb)
720{
721 struct v4l2_fh *fh = file->private_data;
722
723 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
724}
725EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
726
727int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
728 enum v4l2_buf_type type)
729{
730 struct v4l2_fh *fh = file->private_data;
731
732 return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
733}
734EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
735
736int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
737 enum v4l2_buf_type type)
738{
739 struct v4l2_fh *fh = file->private_data;
740
741 return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
742}
743EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
744
745/*
746 * v4l2_file_operations helpers. It is assumed here same lock is used
747 * for the output and the capture buffer queue.
748 */
749
750int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
751{
752 struct v4l2_fh *fh = file->private_data;
Sylwester Nawrocki8e6e8f92013-09-14 18:39:04 -0300753
Hans Verkuile7525772015-07-20 04:58:24 -0300754 return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
Sylwester Nawrocki8e6e8f92013-09-14 18:39:04 -0300755}
756EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
757
758unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
759{
760 struct v4l2_fh *fh = file->private_data;
761 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
762 unsigned int ret;
763
764 if (m2m_ctx->q_lock)
765 mutex_lock(m2m_ctx->q_lock);
766
767 ret = v4l2_m2m_poll(file, m2m_ctx, wait);
768
769 if (m2m_ctx->q_lock)
770 mutex_unlock(m2m_ctx->q_lock);
771
772 return ret;
773}
774EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
775