Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 1 | /* |
| 2 | * Memory-to-memory device framework for Video for Linux 2 and videobuf. |
| 3 | * |
| 4 | * Helper functions for devices that use videobuf buffers for both their |
| 5 | * source and destination. |
| 6 | * |
| 7 | * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. |
Pawel Osciak | 9507208 | 2011-03-13 15:23:32 -0300 | [diff] [blame] | 8 | * Pawel Osciak, <pawel@osciak.com> |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 9 | * Marek Szyprowski, <m.szyprowski@samsung.com> |
| 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify |
| 12 | * it under the terms of the GNU General Public License as published by the |
| 13 | * Free Software Foundation; either version 2 of the License, or (at your |
| 14 | * option) any later version. |
| 15 | */ |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/sched.h> |
| 18 | #include <linux/slab.h> |
| 19 | |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 20 | #include <media/videobuf2-core.h> |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 21 | #include <media/v4l2-mem2mem.h> |
Hans Verkuil | 08eb851 | 2012-07-18 10:53:04 -0300 | [diff] [blame] | 22 | #include <media/v4l2-dev.h> |
| 23 | #include <media/v4l2-fh.h> |
| 24 | #include <media/v4l2-event.h> |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 25 | |
| 26 | MODULE_DESCRIPTION("Mem to mem device framework for videobuf"); |
Pawel Osciak | 9507208 | 2011-03-13 15:23:32 -0300 | [diff] [blame] | 27 | MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 28 | MODULE_LICENSE("GPL"); |
| 29 | |
| 30 | static bool debug; |
| 31 | module_param(debug, bool, 0644); |
| 32 | |
| 33 | #define dprintk(fmt, arg...) \ |
| 34 | do { \ |
| 35 | if (debug) \ |
| 36 | printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ |
| 37 | } while (0) |
| 38 | |
| 39 | |
| 40 | /* Instance is already queued on the job_queue */ |
| 41 | #define TRANS_QUEUED (1 << 0) |
| 42 | /* Instance is currently running in hardware */ |
| 43 | #define TRANS_RUNNING (1 << 1) |
Shaik Ameer Basha | 2ad5389 | 2013-09-20 03:26:18 -0300 | [diff] [blame] | 44 | /* Instance is currently aborting */ |
| 45 | #define TRANS_ABORT (1 << 2) |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 46 | |
| 47 | |
| 48 | /* Offset base for buffers on the destination queue - used to distinguish |
| 49 | * between source and destination buffers when mmapping - they receive the same |
| 50 | * offsets but for different queues */ |
| 51 | #define DST_QUEUE_OFF_BASE (1 << 30) |
| 52 | |
| 53 | |
| 54 | /** |
| 55 | * struct v4l2_m2m_dev - per-device context |
| 56 | * @curr_ctx: currently running instance |
| 57 | * @job_queue: instances queued to run |
| 58 | * @job_spinlock: protects job_queue |
| 59 | * @m2m_ops: driver callbacks |
| 60 | */ |
| 61 | struct v4l2_m2m_dev { |
| 62 | struct v4l2_m2m_ctx *curr_ctx; |
| 63 | |
| 64 | struct list_head job_queue; |
| 65 | spinlock_t job_spinlock; |
| 66 | |
Guennadi Liakhovetski | b1252eb | 2012-09-11 06:32:17 -0300 | [diff] [blame] | 67 | const struct v4l2_m2m_ops *m2m_ops; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 68 | }; |
| 69 | |
| 70 | static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, |
| 71 | enum v4l2_buf_type type) |
| 72 | { |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 73 | if (V4L2_TYPE_IS_OUTPUT(type)) |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 74 | return &m2m_ctx->out_q_ctx; |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 75 | else |
| 76 | return &m2m_ctx->cap_q_ctx; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 77 | } |
| 78 | |
| 79 | /** |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 80 | * v4l2_m2m_get_vq() - return vb2_queue for the given type |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 81 | */ |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 82 | struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 83 | enum v4l2_buf_type type) |
| 84 | { |
| 85 | struct v4l2_m2m_queue_ctx *q_ctx; |
| 86 | |
| 87 | q_ctx = get_queue_ctx(m2m_ctx, type); |
| 88 | if (!q_ctx) |
| 89 | return NULL; |
| 90 | |
| 91 | return &q_ctx->q; |
| 92 | } |
| 93 | EXPORT_SYMBOL(v4l2_m2m_get_vq); |
| 94 | |
| 95 | /** |
| 96 | * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers |
| 97 | */ |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 98 | void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 99 | { |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 100 | struct v4l2_m2m_buffer *b = NULL; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 101 | unsigned long flags; |
| 102 | |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 103 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 104 | |
Andrzej Pietrasiewicz | a6bd62be | 2011-08-25 07:21:21 -0300 | [diff] [blame] | 105 | if (list_empty(&q_ctx->rdy_queue)) { |
| 106 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); |
| 107 | return NULL; |
| 108 | } |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 109 | |
Sascha Hauer | c392e9e | 2012-08-31 09:18:03 -0300 | [diff] [blame] | 110 | b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 111 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); |
| 112 | return &b->vb; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 113 | } |
| 114 | EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); |
| 115 | |
| 116 | /** |
| 117 | * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and |
| 118 | * return it |
| 119 | */ |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 120 | void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 121 | { |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 122 | struct v4l2_m2m_buffer *b = NULL; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 123 | unsigned long flags; |
| 124 | |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 125 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
Andrzej Pietrasiewicz | a6bd62be | 2011-08-25 07:21:21 -0300 | [diff] [blame] | 126 | if (list_empty(&q_ctx->rdy_queue)) { |
| 127 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); |
| 128 | return NULL; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 129 | } |
Sascha Hauer | c392e9e | 2012-08-31 09:18:03 -0300 | [diff] [blame] | 130 | b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); |
Andrzej Pietrasiewicz | a6bd62be | 2011-08-25 07:21:21 -0300 | [diff] [blame] | 131 | list_del(&b->list); |
| 132 | q_ctx->num_rdy--; |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 133 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 134 | |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 135 | return &b->vb; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 136 | } |
| 137 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); |
| 138 | |
| 139 | /* |
| 140 | * Scheduling handlers |
| 141 | */ |
| 142 | |
| 143 | /** |
| 144 | * v4l2_m2m_get_curr_priv() - return driver private data for the currently |
| 145 | * running instance or NULL if no instance is running |
| 146 | */ |
| 147 | void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) |
| 148 | { |
| 149 | unsigned long flags; |
| 150 | void *ret = NULL; |
| 151 | |
| 152 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
| 153 | if (m2m_dev->curr_ctx) |
| 154 | ret = m2m_dev->curr_ctx->priv; |
| 155 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
| 156 | |
| 157 | return ret; |
| 158 | } |
| 159 | EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); |
| 160 | |
| 161 | /** |
| 162 | * v4l2_m2m_try_run() - select next job to perform and run it if possible |
| 163 | * |
| 164 | * Get next transaction (if present) from the waiting jobs list and run it. |
| 165 | */ |
| 166 | static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) |
| 167 | { |
| 168 | unsigned long flags; |
| 169 | |
| 170 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
| 171 | if (NULL != m2m_dev->curr_ctx) { |
| 172 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
| 173 | dprintk("Another instance is running, won't run now\n"); |
| 174 | return; |
| 175 | } |
| 176 | |
| 177 | if (list_empty(&m2m_dev->job_queue)) { |
| 178 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
| 179 | dprintk("No job pending\n"); |
| 180 | return; |
| 181 | } |
| 182 | |
Sascha Hauer | c392e9e | 2012-08-31 09:18:03 -0300 | [diff] [blame] | 183 | m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 184 | struct v4l2_m2m_ctx, queue); |
| 185 | m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; |
| 186 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
| 187 | |
| 188 | m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); |
| 189 | } |
| 190 | |
| 191 | /** |
| 192 | * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to |
| 193 | * the pending job queue and add it if so. |
| 194 | * @m2m_ctx: m2m context assigned to the instance to be checked |
| 195 | * |
| 196 | * There are three basic requirements an instance has to meet to be able to run: |
| 197 | * 1) at least one source buffer has to be queued, |
| 198 | * 2) at least one destination buffer has to be queued, |
| 199 | * 3) streaming has to be on. |
| 200 | * |
Philipp Zabel | 33bdd5a | 2013-06-03 04:23:48 -0300 | [diff] [blame] | 201 | * If a queue is buffered (for example a decoder hardware ringbuffer that has |
| 202 | * to be drained before doing streamoff), allow scheduling without v4l2 buffers |
| 203 | * on that queue. |
| 204 | * |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 205 | * There may also be additional, custom requirements. In such case the driver |
| 206 | * should supply a custom callback (job_ready in v4l2_m2m_ops) that should |
| 207 | * return 1 if the instance is ready. |
| 208 | * An example of the above could be an instance that requires more than one |
| 209 | * src/dst buffer per transaction. |
| 210 | */ |
Michael Olbrich | 1190a41 | 2014-07-22 09:36:04 -0300 | [diff] [blame^] | 211 | void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 212 | { |
| 213 | struct v4l2_m2m_dev *m2m_dev; |
John Sheu | b730627 | 2013-05-23 20:41:48 -0300 | [diff] [blame] | 214 | unsigned long flags_job, flags_out, flags_cap; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 215 | |
| 216 | m2m_dev = m2m_ctx->m2m_dev; |
| 217 | dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); |
| 218 | |
| 219 | if (!m2m_ctx->out_q_ctx.q.streaming |
| 220 | || !m2m_ctx->cap_q_ctx.q.streaming) { |
| 221 | dprintk("Streaming needs to be on for both queues\n"); |
| 222 | return; |
| 223 | } |
| 224 | |
| 225 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); |
Shaik Ameer Basha | 2ad5389 | 2013-09-20 03:26:18 -0300 | [diff] [blame] | 226 | |
| 227 | /* If the context is aborted then don't schedule it */ |
| 228 | if (m2m_ctx->job_flags & TRANS_ABORT) { |
| 229 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
| 230 | dprintk("Aborted context\n"); |
| 231 | return; |
| 232 | } |
| 233 | |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 234 | if (m2m_ctx->job_flags & TRANS_QUEUED) { |
| 235 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
| 236 | dprintk("On job queue already\n"); |
| 237 | return; |
| 238 | } |
| 239 | |
John Sheu | b730627 | 2013-05-23 20:41:48 -0300 | [diff] [blame] | 240 | spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); |
Philipp Zabel | 33bdd5a | 2013-06-03 04:23:48 -0300 | [diff] [blame] | 241 | if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue) |
| 242 | && !m2m_ctx->out_q_ctx.buffered) { |
John Sheu | b730627 | 2013-05-23 20:41:48 -0300 | [diff] [blame] | 243 | spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, |
| 244 | flags_out); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 245 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
| 246 | dprintk("No input buffers available\n"); |
| 247 | return; |
| 248 | } |
John Sheu | b730627 | 2013-05-23 20:41:48 -0300 | [diff] [blame] | 249 | spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); |
Philipp Zabel | 33bdd5a | 2013-06-03 04:23:48 -0300 | [diff] [blame] | 250 | if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue) |
| 251 | && !m2m_ctx->cap_q_ctx.buffered) { |
John Sheu | b730627 | 2013-05-23 20:41:48 -0300 | [diff] [blame] | 252 | spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, |
| 253 | flags_cap); |
| 254 | spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, |
| 255 | flags_out); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 256 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
| 257 | dprintk("No output buffers available\n"); |
| 258 | return; |
| 259 | } |
John Sheu | b730627 | 2013-05-23 20:41:48 -0300 | [diff] [blame] | 260 | spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); |
| 261 | spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 262 | |
| 263 | if (m2m_dev->m2m_ops->job_ready |
| 264 | && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { |
| 265 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
| 266 | dprintk("Driver not ready\n"); |
| 267 | return; |
| 268 | } |
| 269 | |
| 270 | list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); |
| 271 | m2m_ctx->job_flags |= TRANS_QUEUED; |
| 272 | |
| 273 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
| 274 | |
| 275 | v4l2_m2m_try_run(m2m_dev); |
| 276 | } |
Michael Olbrich | 1190a41 | 2014-07-22 09:36:04 -0300 | [diff] [blame^] | 277 | EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 278 | |
| 279 | /** |
Shaik Ameer Basha | fea564a | 2013-08-13 02:58:07 -0300 | [diff] [blame] | 280 | * v4l2_m2m_cancel_job() - cancel pending jobs for the context |
| 281 | * |
| 282 | * In case of streamoff or release called on any context, |
| 283 | * 1] If the context is currently running, then abort job will be called |
| 284 | * 2] If the context is queued, then the context will be removed from |
| 285 | * the job_queue |
| 286 | */ |
| 287 | static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) |
| 288 | { |
| 289 | struct v4l2_m2m_dev *m2m_dev; |
| 290 | unsigned long flags; |
| 291 | |
| 292 | m2m_dev = m2m_ctx->m2m_dev; |
| 293 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
Shaik Ameer Basha | 2ad5389 | 2013-09-20 03:26:18 -0300 | [diff] [blame] | 294 | |
| 295 | m2m_ctx->job_flags |= TRANS_ABORT; |
Shaik Ameer Basha | fea564a | 2013-08-13 02:58:07 -0300 | [diff] [blame] | 296 | if (m2m_ctx->job_flags & TRANS_RUNNING) { |
| 297 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
| 298 | m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); |
| 299 | dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); |
| 300 | wait_event(m2m_ctx->finished, |
| 301 | !(m2m_ctx->job_flags & TRANS_RUNNING)); |
| 302 | } else if (m2m_ctx->job_flags & TRANS_QUEUED) { |
| 303 | list_del(&m2m_ctx->queue); |
| 304 | m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); |
| 305 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
| 306 | dprintk("m2m_ctx: %p had been on queue and was removed\n", |
| 307 | m2m_ctx); |
| 308 | } else { |
| 309 | /* Do nothing, was not on queue/running */ |
| 310 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
| 311 | } |
| 312 | } |
| 313 | |
| 314 | /** |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 315 | * v4l2_m2m_job_finish() - inform the framework that a job has been finished |
| 316 | * and have it clean up |
| 317 | * |
| 318 | * Called by a driver to yield back the device after it has finished with it. |
| 319 | * Should be called as soon as possible after reaching a state which allows |
| 320 | * other instances to take control of the device. |
| 321 | * |
| 322 | * This function has to be called only after device_run() callback has been |
| 323 | * called on the driver. To prevent recursion, it should not be called directly |
| 324 | * from the device_run() callback though. |
| 325 | */ |
| 326 | void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, |
| 327 | struct v4l2_m2m_ctx *m2m_ctx) |
| 328 | { |
| 329 | unsigned long flags; |
| 330 | |
| 331 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
| 332 | if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { |
| 333 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
| 334 | dprintk("Called by an instance not currently running\n"); |
| 335 | return; |
| 336 | } |
| 337 | |
| 338 | list_del(&m2m_dev->curr_ctx->queue); |
| 339 | m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 340 | wake_up(&m2m_dev->curr_ctx->finished); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 341 | m2m_dev->curr_ctx = NULL; |
| 342 | |
| 343 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
| 344 | |
| 345 | /* This instance might have more buffers ready, but since we do not |
| 346 | * allow more than one job on the job_queue per instance, each has |
| 347 | * to be scheduled separately after the previous one finishes. */ |
| 348 | v4l2_m2m_try_schedule(m2m_ctx); |
| 349 | v4l2_m2m_try_run(m2m_dev); |
| 350 | } |
| 351 | EXPORT_SYMBOL(v4l2_m2m_job_finish); |
| 352 | |
| 353 | /** |
| 354 | * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer |
| 355 | */ |
| 356 | int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
| 357 | struct v4l2_requestbuffers *reqbufs) |
| 358 | { |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 359 | struct vb2_queue *vq; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 360 | |
| 361 | vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 362 | return vb2_reqbufs(vq, reqbufs); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 363 | } |
| 364 | EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); |
| 365 | |
| 366 | /** |
| 367 | * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer |
| 368 | * |
| 369 | * See v4l2_m2m_mmap() documentation for details. |
| 370 | */ |
| 371 | int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
| 372 | struct v4l2_buffer *buf) |
| 373 | { |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 374 | struct vb2_queue *vq; |
| 375 | int ret = 0; |
| 376 | unsigned int i; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 377 | |
| 378 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 379 | ret = vb2_querybuf(vq, buf); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 380 | |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 381 | /* Adjust MMAP memory offsets for the CAPTURE queue */ |
| 382 | if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) { |
| 383 | if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { |
| 384 | for (i = 0; i < buf->length; ++i) |
| 385 | buf->m.planes[i].m.mem_offset |
| 386 | += DST_QUEUE_OFF_BASE; |
| 387 | } else { |
| 388 | buf->m.offset += DST_QUEUE_OFF_BASE; |
| 389 | } |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 390 | } |
| 391 | |
| 392 | return ret; |
| 393 | } |
| 394 | EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); |
| 395 | |
| 396 | /** |
| 397 | * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on |
| 398 | * the type |
| 399 | */ |
| 400 | int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
| 401 | struct v4l2_buffer *buf) |
| 402 | { |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 403 | struct vb2_queue *vq; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 404 | int ret; |
| 405 | |
| 406 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 407 | ret = vb2_qbuf(vq, buf); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 408 | if (!ret) |
| 409 | v4l2_m2m_try_schedule(m2m_ctx); |
| 410 | |
| 411 | return ret; |
| 412 | } |
| 413 | EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); |
| 414 | |
| 415 | /** |
| 416 | * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on |
| 417 | * the type |
| 418 | */ |
| 419 | int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
| 420 | struct v4l2_buffer *buf) |
| 421 | { |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 422 | struct vb2_queue *vq; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 423 | |
| 424 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 425 | return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 426 | } |
| 427 | EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); |
| 428 | |
| 429 | /** |
Philipp Zabel | 8b94ca6 | 2013-05-21 04:16:28 -0300 | [diff] [blame] | 430 | * v4l2_m2m_create_bufs() - create a source or destination buffer, depending |
| 431 | * on the type |
| 432 | */ |
| 433 | int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
| 434 | struct v4l2_create_buffers *create) |
| 435 | { |
| 436 | struct vb2_queue *vq; |
| 437 | |
| 438 | vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); |
| 439 | return vb2_create_bufs(vq, create); |
| 440 | } |
| 441 | EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); |
| 442 | |
| 443 | /** |
Tomasz Stanislawski | 83ae7c5 | 2012-06-14 11:32:24 -0300 | [diff] [blame] | 444 | * v4l2_m2m_expbuf() - export a source or destination buffer, depending on |
| 445 | * the type |
| 446 | */ |
| 447 | int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
| 448 | struct v4l2_exportbuffer *eb) |
| 449 | { |
| 450 | struct vb2_queue *vq; |
| 451 | |
| 452 | vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); |
| 453 | return vb2_expbuf(vq, eb); |
| 454 | } |
| 455 | EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); |
| 456 | /** |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 457 | * v4l2_m2m_streamon() - turn on streaming for a video queue |
| 458 | */ |
| 459 | int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
| 460 | enum v4l2_buf_type type) |
| 461 | { |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 462 | struct vb2_queue *vq; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 463 | int ret; |
| 464 | |
| 465 | vq = v4l2_m2m_get_vq(m2m_ctx, type); |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 466 | ret = vb2_streamon(vq, type); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 467 | if (!ret) |
| 468 | v4l2_m2m_try_schedule(m2m_ctx); |
| 469 | |
| 470 | return ret; |
| 471 | } |
| 472 | EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); |
| 473 | |
| 474 | /** |
| 475 | * v4l2_m2m_streamoff() - turn off streaming for a video queue |
| 476 | */ |
| 477 | int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
| 478 | enum v4l2_buf_type type) |
| 479 | { |
John Sheu | 401f6a2 | 2013-02-06 20:03:01 -0300 | [diff] [blame] | 480 | struct v4l2_m2m_dev *m2m_dev; |
| 481 | struct v4l2_m2m_queue_ctx *q_ctx; |
| 482 | unsigned long flags_job, flags; |
| 483 | int ret; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 484 | |
Shaik Ameer Basha | fea564a | 2013-08-13 02:58:07 -0300 | [diff] [blame] | 485 | /* wait until the current context is dequeued from job_queue */ |
| 486 | v4l2_m2m_cancel_job(m2m_ctx); |
| 487 | |
John Sheu | 401f6a2 | 2013-02-06 20:03:01 -0300 | [diff] [blame] | 488 | q_ctx = get_queue_ctx(m2m_ctx, type); |
| 489 | ret = vb2_streamoff(&q_ctx->q, type); |
| 490 | if (ret) |
| 491 | return ret; |
| 492 | |
| 493 | m2m_dev = m2m_ctx->m2m_dev; |
| 494 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); |
| 495 | /* We should not be scheduled anymore, since we're dropping a queue. */ |
Philipp Zabel | d7bb0ce8 | 2013-09-19 04:40:32 -0300 | [diff] [blame] | 496 | if (m2m_ctx->job_flags & TRANS_QUEUED) |
| 497 | list_del(&m2m_ctx->queue); |
John Sheu | 401f6a2 | 2013-02-06 20:03:01 -0300 | [diff] [blame] | 498 | m2m_ctx->job_flags = 0; |
| 499 | |
| 500 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
| 501 | /* Drop queue, since streamoff returns device to the same state as after |
| 502 | * calling reqbufs. */ |
| 503 | INIT_LIST_HEAD(&q_ctx->rdy_queue); |
Philipp Zabel | 84e6809 | 2013-09-19 04:53:21 -0300 | [diff] [blame] | 504 | q_ctx->num_rdy = 0; |
John Sheu | 401f6a2 | 2013-02-06 20:03:01 -0300 | [diff] [blame] | 505 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); |
| 506 | |
| 507 | if (m2m_dev->curr_ctx == m2m_ctx) { |
| 508 | m2m_dev->curr_ctx = NULL; |
| 509 | wake_up(&m2m_ctx->finished); |
| 510 | } |
| 511 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
| 512 | |
| 513 | return 0; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 514 | } |
| 515 | EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); |
| 516 | |
| 517 | /** |
| 518 | * v4l2_m2m_poll() - poll replacement, for destination buffers only |
| 519 | * |
| 520 | * Call from the driver's poll() function. Will poll both queues. If a buffer |
| 521 | * is available to dequeue (with dqbuf) from the source queue, this will |
| 522 | * indicate that a non-blocking write can be performed, while read will be |
| 523 | * returned in case of the destination queue. |
| 524 | */ |
| 525 | unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
| 526 | struct poll_table_struct *wait) |
| 527 | { |
Hans Verkuil | 08eb851 | 2012-07-18 10:53:04 -0300 | [diff] [blame] | 528 | struct video_device *vfd = video_devdata(file); |
| 529 | unsigned long req_events = poll_requested_events(wait); |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 530 | struct vb2_queue *src_q, *dst_q; |
| 531 | struct vb2_buffer *src_vb = NULL, *dst_vb = NULL; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 532 | unsigned int rc = 0; |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 533 | unsigned long flags; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 534 | |
Hans Verkuil | 08eb851 | 2012-07-18 10:53:04 -0300 | [diff] [blame] | 535 | if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { |
| 536 | struct v4l2_fh *fh = file->private_data; |
| 537 | |
| 538 | if (v4l2_event_pending(fh)) |
| 539 | rc = POLLPRI; |
| 540 | else if (req_events & POLLPRI) |
| 541 | poll_wait(file, &fh->wait, wait); |
| 542 | if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM))) |
| 543 | return rc; |
| 544 | } |
| 545 | |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 546 | src_q = v4l2_m2m_get_src_vq(m2m_ctx); |
| 547 | dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); |
| 548 | |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 549 | /* |
| 550 | * There has to be at least one buffer queued on each queued_list, which |
| 551 | * means either in driver already or waiting for driver to claim it |
| 552 | * and start processing. |
| 553 | */ |
| 554 | if ((!src_q->streaming || list_empty(&src_q->queued_list)) |
| 555 | && (!dst_q->streaming || list_empty(&dst_q->queued_list))) { |
Hans Verkuil | 08eb851 | 2012-07-18 10:53:04 -0300 | [diff] [blame] | 556 | rc |= POLLERR; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 557 | goto end; |
| 558 | } |
| 559 | |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 560 | if (m2m_ctx->m2m_dev->m2m_ops->unlock) |
| 561 | m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv); |
Sylwester Nawrocki | 8e6e8f9 | 2013-09-14 18:39:04 -0300 | [diff] [blame] | 562 | else if (m2m_ctx->q_lock) |
| 563 | mutex_unlock(m2m_ctx->q_lock); |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 564 | |
Seung-Woo Kim | 5718346 | 2013-05-20 23:47:30 -0300 | [diff] [blame] | 565 | if (list_empty(&src_q->done_list)) |
| 566 | poll_wait(file, &src_q->done_wq, wait); |
| 567 | if (list_empty(&dst_q->done_list)) |
| 568 | poll_wait(file, &dst_q->done_wq, wait); |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 569 | |
| 570 | if (m2m_ctx->m2m_dev->m2m_ops->lock) |
| 571 | m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv); |
Philipp Zabel | a85fd20 | 2014-05-26 10:49:22 -0300 | [diff] [blame] | 572 | else if (m2m_ctx->q_lock) { |
| 573 | if (mutex_lock_interruptible(m2m_ctx->q_lock)) { |
| 574 | rc |= POLLERR; |
| 575 | goto end; |
| 576 | } |
| 577 | } |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 578 | |
| 579 | spin_lock_irqsave(&src_q->done_lock, flags); |
| 580 | if (!list_empty(&src_q->done_list)) |
| 581 | src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer, |
| 582 | done_entry); |
| 583 | if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE |
| 584 | || src_vb->state == VB2_BUF_STATE_ERROR)) |
| 585 | rc |= POLLOUT | POLLWRNORM; |
| 586 | spin_unlock_irqrestore(&src_q->done_lock, flags); |
| 587 | |
| 588 | spin_lock_irqsave(&dst_q->done_lock, flags); |
| 589 | if (!list_empty(&dst_q->done_list)) |
| 590 | dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer, |
| 591 | done_entry); |
| 592 | if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE |
| 593 | || dst_vb->state == VB2_BUF_STATE_ERROR)) |
| 594 | rc |= POLLIN | POLLRDNORM; |
| 595 | spin_unlock_irqrestore(&dst_q->done_lock, flags); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 596 | |
| 597 | end: |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 598 | return rc; |
| 599 | } |
| 600 | EXPORT_SYMBOL_GPL(v4l2_m2m_poll); |
| 601 | |
| 602 | /** |
| 603 | * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer |
| 604 | * |
| 605 | * Call from driver's mmap() function. Will handle mmap() for both queues |
| 606 | * seamlessly for videobuffer, which will receive normal per-queue offsets and |
| 607 | * proper videobuf queue pointers. The differentiation is made outside videobuf |
| 608 | * by adding a predefined offset to buffers from one of the queues and |
| 609 | * subtracting it before passing it back to videobuf. Only drivers (and |
| 610 | * thus applications) receive modified offsets. |
| 611 | */ |
| 612 | int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
| 613 | struct vm_area_struct *vma) |
| 614 | { |
| 615 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 616 | struct vb2_queue *vq; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 617 | |
| 618 | if (offset < DST_QUEUE_OFF_BASE) { |
| 619 | vq = v4l2_m2m_get_src_vq(m2m_ctx); |
| 620 | } else { |
| 621 | vq = v4l2_m2m_get_dst_vq(m2m_ctx); |
| 622 | vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); |
| 623 | } |
| 624 | |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 625 | return vb2_mmap(vq, vma); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 626 | } |
| 627 | EXPORT_SYMBOL(v4l2_m2m_mmap); |
| 628 | |
| 629 | /** |
| 630 | * v4l2_m2m_init() - initialize per-driver m2m data |
| 631 | * |
| 632 | * Usually called from driver's probe() function. |
| 633 | */ |
Guennadi Liakhovetski | b1252eb | 2012-09-11 06:32:17 -0300 | [diff] [blame] | 634 | struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 635 | { |
| 636 | struct v4l2_m2m_dev *m2m_dev; |
| 637 | |
Nicolas THERY | 3fac4eb | 2012-10-23 04:47:19 -0300 | [diff] [blame] | 638 | if (!m2m_ops || WARN_ON(!m2m_ops->device_run) || |
| 639 | WARN_ON(!m2m_ops->job_abort)) |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 640 | return ERR_PTR(-EINVAL); |
| 641 | |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 642 | m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); |
| 643 | if (!m2m_dev) |
| 644 | return ERR_PTR(-ENOMEM); |
| 645 | |
| 646 | m2m_dev->curr_ctx = NULL; |
| 647 | m2m_dev->m2m_ops = m2m_ops; |
| 648 | INIT_LIST_HEAD(&m2m_dev->job_queue); |
| 649 | spin_lock_init(&m2m_dev->job_spinlock); |
| 650 | |
| 651 | return m2m_dev; |
| 652 | } |
| 653 | EXPORT_SYMBOL_GPL(v4l2_m2m_init); |
| 654 | |
| 655 | /** |
| 656 | * v4l2_m2m_release() - cleans up and frees a m2m_dev structure |
| 657 | * |
| 658 | * Usually called from driver's remove() function. |
| 659 | */ |
| 660 | void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) |
| 661 | { |
| 662 | kfree(m2m_dev); |
| 663 | } |
| 664 | EXPORT_SYMBOL_GPL(v4l2_m2m_release); |
| 665 | |
| 666 | /** |
| 667 | * v4l2_m2m_ctx_init() - allocate and initialize a m2m context |
| 668 | * @priv - driver's instance private data |
| 669 | * @m2m_dev - a previously initialized m2m_dev struct |
| 670 | * @vq_init - a callback for queue type-specific initialization function to be |
| 671 | * used for initializing videobuf_queues |
| 672 | * |
| 673 | * Usually called from driver's open() function. |
| 674 | */ |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 675 | struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, |
| 676 | void *drv_priv, |
| 677 | int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 678 | { |
| 679 | struct v4l2_m2m_ctx *m2m_ctx; |
| 680 | struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 681 | int ret; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 682 | |
| 683 | m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); |
| 684 | if (!m2m_ctx) |
| 685 | return ERR_PTR(-ENOMEM); |
| 686 | |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 687 | m2m_ctx->priv = drv_priv; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 688 | m2m_ctx->m2m_dev = m2m_dev; |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 689 | init_waitqueue_head(&m2m_ctx->finished); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 690 | |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 691 | out_q_ctx = &m2m_ctx->out_q_ctx; |
| 692 | cap_q_ctx = &m2m_ctx->cap_q_ctx; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 693 | |
| 694 | INIT_LIST_HEAD(&out_q_ctx->rdy_queue); |
| 695 | INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 696 | spin_lock_init(&out_q_ctx->rdy_spinlock); |
| 697 | spin_lock_init(&cap_q_ctx->rdy_spinlock); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 698 | |
| 699 | INIT_LIST_HEAD(&m2m_ctx->queue); |
| 700 | |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 701 | ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); |
| 702 | |
| 703 | if (ret) |
| 704 | goto err; |
Sylwester Nawrocki | 8e6e8f9 | 2013-09-14 18:39:04 -0300 | [diff] [blame] | 705 | /* |
| 706 | * If both queues use same mutex assign it as the common buffer |
| 707 | * queues lock to the m2m context. This lock is used in the |
| 708 | * v4l2_m2m_ioctl_* helpers. |
| 709 | */ |
| 710 | if (out_q_ctx->q.lock == cap_q_ctx->q.lock) |
| 711 | m2m_ctx->q_lock = out_q_ctx->q.lock; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 712 | |
| 713 | return m2m_ctx; |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 714 | err: |
| 715 | kfree(m2m_ctx); |
| 716 | return ERR_PTR(ret); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 717 | } |
| 718 | EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); |
| 719 | |
| 720 | /** |
| 721 | * v4l2_m2m_ctx_release() - release m2m context |
| 722 | * |
| 723 | * Usually called from driver's release() function. |
| 724 | */ |
| 725 | void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) |
| 726 | { |
Shaik Ameer Basha | fea564a | 2013-08-13 02:58:07 -0300 | [diff] [blame] | 727 | /* wait until the current context is dequeued from job_queue */ |
| 728 | v4l2_m2m_cancel_job(m2m_ctx); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 729 | |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 730 | vb2_queue_release(&m2m_ctx->cap_q_ctx.q); |
| 731 | vb2_queue_release(&m2m_ctx->out_q_ctx.q); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 732 | |
| 733 | kfree(m2m_ctx); |
| 734 | } |
| 735 | EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); |
| 736 | |
| 737 | /** |
| 738 | * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list. |
| 739 | * |
| 740 | * Call from buf_queue(), videobuf_queue_ops callback. |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 741 | */ |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 742 | void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb) |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 743 | { |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 744 | struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 745 | struct v4l2_m2m_queue_ctx *q_ctx; |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 746 | unsigned long flags; |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 747 | |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 748 | q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 749 | if (!q_ctx) |
| 750 | return; |
| 751 | |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 752 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
| 753 | list_add_tail(&b->list, &q_ctx->rdy_queue); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 754 | q_ctx->num_rdy++; |
Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 755 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); |
Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 756 | } |
| 757 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); |
| 758 | |
Sylwester Nawrocki | 8e6e8f9 | 2013-09-14 18:39:04 -0300 | [diff] [blame] | 759 | /* Videobuf2 ioctl helpers */ |
| 760 | |
| 761 | int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, |
| 762 | struct v4l2_requestbuffers *rb) |
| 763 | { |
| 764 | struct v4l2_fh *fh = file->private_data; |
| 765 | |
| 766 | return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); |
| 767 | } |
| 768 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs); |
| 769 | |
| 770 | int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, |
| 771 | struct v4l2_create_buffers *create) |
| 772 | { |
| 773 | struct v4l2_fh *fh = file->private_data; |
| 774 | |
| 775 | return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); |
| 776 | } |
| 777 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs); |
| 778 | |
| 779 | int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, |
| 780 | struct v4l2_buffer *buf) |
| 781 | { |
| 782 | struct v4l2_fh *fh = file->private_data; |
| 783 | |
| 784 | return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); |
| 785 | } |
| 786 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf); |
| 787 | |
| 788 | int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, |
| 789 | struct v4l2_buffer *buf) |
| 790 | { |
| 791 | struct v4l2_fh *fh = file->private_data; |
| 792 | |
| 793 | return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); |
| 794 | } |
| 795 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf); |
| 796 | |
| 797 | int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, |
| 798 | struct v4l2_buffer *buf) |
| 799 | { |
| 800 | struct v4l2_fh *fh = file->private_data; |
| 801 | |
| 802 | return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); |
| 803 | } |
| 804 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf); |
| 805 | |
| 806 | int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, |
| 807 | struct v4l2_exportbuffer *eb) |
| 808 | { |
| 809 | struct v4l2_fh *fh = file->private_data; |
| 810 | |
| 811 | return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); |
| 812 | } |
| 813 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf); |
| 814 | |
| 815 | int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, |
| 816 | enum v4l2_buf_type type) |
| 817 | { |
| 818 | struct v4l2_fh *fh = file->private_data; |
| 819 | |
| 820 | return v4l2_m2m_streamon(file, fh->m2m_ctx, type); |
| 821 | } |
| 822 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon); |
| 823 | |
| 824 | int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, |
| 825 | enum v4l2_buf_type type) |
| 826 | { |
| 827 | struct v4l2_fh *fh = file->private_data; |
| 828 | |
| 829 | return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); |
| 830 | } |
| 831 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); |
| 832 | |
| 833 | /* |
| 834 | * v4l2_file_operations helpers. It is assumed here same lock is used |
| 835 | * for the output and the capture buffer queue. |
| 836 | */ |
| 837 | |
| 838 | int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma) |
| 839 | { |
| 840 | struct v4l2_fh *fh = file->private_data; |
| 841 | struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; |
| 842 | int ret; |
| 843 | |
| 844 | if (m2m_ctx->q_lock && mutex_lock_interruptible(m2m_ctx->q_lock)) |
| 845 | return -ERESTARTSYS; |
| 846 | |
| 847 | ret = v4l2_m2m_mmap(file, m2m_ctx, vma); |
| 848 | |
| 849 | if (m2m_ctx->q_lock) |
| 850 | mutex_unlock(m2m_ctx->q_lock); |
| 851 | |
| 852 | return ret; |
| 853 | } |
| 854 | EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap); |
| 855 | |
| 856 | unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait) |
| 857 | { |
| 858 | struct v4l2_fh *fh = file->private_data; |
| 859 | struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; |
| 860 | unsigned int ret; |
| 861 | |
| 862 | if (m2m_ctx->q_lock) |
| 863 | mutex_lock(m2m_ctx->q_lock); |
| 864 | |
| 865 | ret = v4l2_m2m_poll(file, m2m_ctx, wait); |
| 866 | |
| 867 | if (m2m_ctx->q_lock) |
| 868 | mutex_unlock(m2m_ctx->q_lock); |
| 869 | |
| 870 | return ret; |
| 871 | } |
| 872 | EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll); |
| 873 | |