| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 1 | /* | 
 | 2 |  * Memory-to-memory device framework for Video for Linux 2 and videobuf. | 
 | 3 |  * | 
 | 4 |  * Helper functions for devices that use videobuf buffers for both their | 
 | 5 |  * source and destination. | 
 | 6 |  * | 
 | 7 |  * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. | 
| Pawel Osciak | 9507208 | 2011-03-13 15:23:32 -0300 | [diff] [blame] | 8 |  * Pawel Osciak, <pawel@osciak.com> | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 9 |  * Marek Szyprowski, <m.szyprowski@samsung.com> | 
 | 10 |  * | 
 | 11 |  * This program is free software; you can redistribute it and/or modify | 
 | 12 |  * it under the terms of the GNU General Public License as published by the | 
 | 13 |  * Free Software Foundation; either version 2 of the License, or (at your | 
 | 14 |  * option) any later version. | 
 | 15 |  */ | 
 | 16 | #include <linux/module.h> | 
 | 17 | #include <linux/sched.h> | 
 | 18 | #include <linux/slab.h> | 
 | 19 |  | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 20 | #include <media/videobuf2-core.h> | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 21 | #include <media/v4l2-mem2mem.h> | 
| Hans Verkuil | 08eb851 | 2012-07-18 10:53:04 -0300 | [diff] [blame] | 22 | #include <media/v4l2-dev.h> | 
 | 23 | #include <media/v4l2-fh.h> | 
 | 24 | #include <media/v4l2-event.h> | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 25 |  | 
 | 26 | MODULE_DESCRIPTION("Mem to mem device framework for videobuf"); | 
| Pawel Osciak | 9507208 | 2011-03-13 15:23:32 -0300 | [diff] [blame] | 27 | MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 28 | MODULE_LICENSE("GPL"); | 
 | 29 |  | 
 | 30 | static bool debug; | 
 | 31 | module_param(debug, bool, 0644); | 
 | 32 |  | 
 | 33 | #define dprintk(fmt, arg...)						\ | 
 | 34 | 	do {								\ | 
 | 35 | 		if (debug)						\ | 
 | 36 | 			printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ | 
 | 37 | 	} while (0) | 
 | 38 |  | 
 | 39 |  | 
 | 40 | /* Instance is already queued on the job_queue */ | 
 | 41 | #define TRANS_QUEUED		(1 << 0) | 
 | 42 | /* Instance is currently running in hardware */ | 
 | 43 | #define TRANS_RUNNING		(1 << 1) | 
 | 44 |  | 
 | 45 |  | 
 | 46 | /* Offset base for buffers on the destination queue - used to distinguish | 
 | 47 |  * between source and destination buffers when mmapping - they receive the same | 
 | 48 |  * offsets but for different queues */ | 
 | 49 | #define DST_QUEUE_OFF_BASE	(1 << 30) | 
 | 50 |  | 
 | 51 |  | 
 | 52 | /** | 
 | 53 |  * struct v4l2_m2m_dev - per-device context | 
 | 54 |  * @curr_ctx:		currently running instance | 
 | 55 |  * @job_queue:		instances queued to run | 
 | 56 |  * @job_spinlock:	protects job_queue | 
 | 57 |  * @m2m_ops:		driver callbacks | 
 | 58 |  */ | 
 | 59 | struct v4l2_m2m_dev { | 
 | 60 | 	struct v4l2_m2m_ctx	*curr_ctx; | 
 | 61 |  | 
 | 62 | 	struct list_head	job_queue; | 
 | 63 | 	spinlock_t		job_spinlock; | 
 | 64 |  | 
 | 65 | 	struct v4l2_m2m_ops	*m2m_ops; | 
 | 66 | }; | 
 | 67 |  | 
 | 68 | static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, | 
 | 69 | 						enum v4l2_buf_type type) | 
 | 70 | { | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 71 | 	if (V4L2_TYPE_IS_OUTPUT(type)) | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 72 | 		return &m2m_ctx->out_q_ctx; | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 73 | 	else | 
 | 74 | 		return &m2m_ctx->cap_q_ctx; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 75 | } | 
 | 76 |  | 
 | 77 | /** | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 78 |  * v4l2_m2m_get_vq() - return vb2_queue for the given type | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 79 |  */ | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 80 | struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 81 | 				       enum v4l2_buf_type type) | 
 | 82 | { | 
 | 83 | 	struct v4l2_m2m_queue_ctx *q_ctx; | 
 | 84 |  | 
 | 85 | 	q_ctx = get_queue_ctx(m2m_ctx, type); | 
 | 86 | 	if (!q_ctx) | 
 | 87 | 		return NULL; | 
 | 88 |  | 
 | 89 | 	return &q_ctx->q; | 
 | 90 | } | 
 | 91 | EXPORT_SYMBOL(v4l2_m2m_get_vq); | 
 | 92 |  | 
 | 93 | /** | 
 | 94 |  * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers | 
 | 95 |  */ | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 96 | void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 97 | { | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 98 | 	struct v4l2_m2m_buffer *b = NULL; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 99 | 	unsigned long flags; | 
 | 100 |  | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 101 | 	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 102 |  | 
| Andrzej Pietrasiewicz | a6bd62be | 2011-08-25 07:21:21 -0300 | [diff] [blame] | 103 | 	if (list_empty(&q_ctx->rdy_queue)) { | 
 | 104 | 		spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); | 
 | 105 | 		return NULL; | 
 | 106 | 	} | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 107 |  | 
| Sascha Hauer | c392e9e | 2012-08-31 09:18:03 -0300 | [diff] [blame] | 108 | 	b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 109 | 	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); | 
 | 110 | 	return &b->vb; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 111 | } | 
 | 112 | EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); | 
 | 113 |  | 
 | 114 | /** | 
 | 115 |  * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and | 
 | 116 |  * return it | 
 | 117 |  */ | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 118 | void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 119 | { | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 120 | 	struct v4l2_m2m_buffer *b = NULL; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 121 | 	unsigned long flags; | 
 | 122 |  | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 123 | 	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); | 
| Andrzej Pietrasiewicz | a6bd62be | 2011-08-25 07:21:21 -0300 | [diff] [blame] | 124 | 	if (list_empty(&q_ctx->rdy_queue)) { | 
 | 125 | 		spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); | 
 | 126 | 		return NULL; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 127 | 	} | 
| Sascha Hauer | c392e9e | 2012-08-31 09:18:03 -0300 | [diff] [blame] | 128 | 	b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); | 
| Andrzej Pietrasiewicz | a6bd62be | 2011-08-25 07:21:21 -0300 | [diff] [blame] | 129 | 	list_del(&b->list); | 
 | 130 | 	q_ctx->num_rdy--; | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 131 | 	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 132 |  | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 133 | 	return &b->vb; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 134 | } | 
 | 135 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); | 
 | 136 |  | 
 | 137 | /* | 
 | 138 |  * Scheduling handlers | 
 | 139 |  */ | 
 | 140 |  | 
 | 141 | /** | 
 | 142 |  * v4l2_m2m_get_curr_priv() - return driver private data for the currently | 
 | 143 |  * running instance or NULL if no instance is running | 
 | 144 |  */ | 
 | 145 | void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) | 
 | 146 | { | 
 | 147 | 	unsigned long flags; | 
 | 148 | 	void *ret = NULL; | 
 | 149 |  | 
 | 150 | 	spin_lock_irqsave(&m2m_dev->job_spinlock, flags); | 
 | 151 | 	if (m2m_dev->curr_ctx) | 
 | 152 | 		ret = m2m_dev->curr_ctx->priv; | 
 | 153 | 	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | 
 | 154 |  | 
 | 155 | 	return ret; | 
 | 156 | } | 
 | 157 | EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); | 
 | 158 |  | 
 | 159 | /** | 
 | 160 |  * v4l2_m2m_try_run() - select next job to perform and run it if possible | 
 | 161 |  * | 
 | 162 |  * Get next transaction (if present) from the waiting jobs list and run it. | 
 | 163 |  */ | 
 | 164 | static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) | 
 | 165 | { | 
 | 166 | 	unsigned long flags; | 
 | 167 |  | 
 | 168 | 	spin_lock_irqsave(&m2m_dev->job_spinlock, flags); | 
 | 169 | 	if (NULL != m2m_dev->curr_ctx) { | 
 | 170 | 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | 
 | 171 | 		dprintk("Another instance is running, won't run now\n"); | 
 | 172 | 		return; | 
 | 173 | 	} | 
 | 174 |  | 
 | 175 | 	if (list_empty(&m2m_dev->job_queue)) { | 
 | 176 | 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | 
 | 177 | 		dprintk("No job pending\n"); | 
 | 178 | 		return; | 
 | 179 | 	} | 
 | 180 |  | 
| Sascha Hauer | c392e9e | 2012-08-31 09:18:03 -0300 | [diff] [blame] | 181 | 	m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 182 | 				   struct v4l2_m2m_ctx, queue); | 
 | 183 | 	m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; | 
 | 184 | 	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | 
 | 185 |  | 
 | 186 | 	m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); | 
 | 187 | } | 
 | 188 |  | 
 | 189 | /** | 
 | 190 |  * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to | 
 | 191 |  * the pending job queue and add it if so. | 
 | 192 |  * @m2m_ctx:	m2m context assigned to the instance to be checked | 
 | 193 |  * | 
 | 194 |  * There are three basic requirements an instance has to meet to be able to run: | 
 | 195 |  * 1) at least one source buffer has to be queued, | 
 | 196 |  * 2) at least one destination buffer has to be queued, | 
 | 197 |  * 3) streaming has to be on. | 
 | 198 |  * | 
 | 199 |  * There may also be additional, custom requirements. In such case the driver | 
 | 200 |  * should supply a custom callback (job_ready in v4l2_m2m_ops) that should | 
 | 201 |  * return 1 if the instance is ready. | 
 | 202 |  * An example of the above could be an instance that requires more than one | 
 | 203 |  * src/dst buffer per transaction. | 
 | 204 |  */ | 
 | 205 | static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) | 
 | 206 | { | 
 | 207 | 	struct v4l2_m2m_dev *m2m_dev; | 
 | 208 | 	unsigned long flags_job, flags; | 
 | 209 |  | 
 | 210 | 	m2m_dev = m2m_ctx->m2m_dev; | 
 | 211 | 	dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); | 
 | 212 |  | 
 | 213 | 	if (!m2m_ctx->out_q_ctx.q.streaming | 
 | 214 | 	    || !m2m_ctx->cap_q_ctx.q.streaming) { | 
 | 215 | 		dprintk("Streaming needs to be on for both queues\n"); | 
 | 216 | 		return; | 
 | 217 | 	} | 
 | 218 |  | 
 | 219 | 	spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); | 
 | 220 | 	if (m2m_ctx->job_flags & TRANS_QUEUED) { | 
 | 221 | 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); | 
 | 222 | 		dprintk("On job queue already\n"); | 
 | 223 | 		return; | 
 | 224 | 	} | 
 | 225 |  | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 226 | 	spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 227 | 	if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) { | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 228 | 		spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 229 | 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); | 
 | 230 | 		dprintk("No input buffers available\n"); | 
 | 231 | 		return; | 
 | 232 | 	} | 
 | 233 | 	if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) { | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 234 | 		spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 235 | 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); | 
 | 236 | 		dprintk("No output buffers available\n"); | 
 | 237 | 		return; | 
 | 238 | 	} | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 239 | 	spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 240 |  | 
 | 241 | 	if (m2m_dev->m2m_ops->job_ready | 
 | 242 | 		&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { | 
 | 243 | 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); | 
 | 244 | 		dprintk("Driver not ready\n"); | 
 | 245 | 		return; | 
 | 246 | 	} | 
 | 247 |  | 
 | 248 | 	list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); | 
 | 249 | 	m2m_ctx->job_flags |= TRANS_QUEUED; | 
 | 250 |  | 
 | 251 | 	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); | 
 | 252 |  | 
 | 253 | 	v4l2_m2m_try_run(m2m_dev); | 
 | 254 | } | 
 | 255 |  | 
 | 256 | /** | 
 | 257 |  * v4l2_m2m_job_finish() - inform the framework that a job has been finished | 
 | 258 |  * and have it clean up | 
 | 259 |  * | 
 | 260 |  * Called by a driver to yield back the device after it has finished with it. | 
 | 261 |  * Should be called as soon as possible after reaching a state which allows | 
 | 262 |  * other instances to take control of the device. | 
 | 263 |  * | 
 | 264 |  * This function has to be called only after device_run() callback has been | 
 | 265 |  * called on the driver. To prevent recursion, it should not be called directly | 
 | 266 |  * from the device_run() callback though. | 
 | 267 |  */ | 
 | 268 | void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, | 
 | 269 | 			 struct v4l2_m2m_ctx *m2m_ctx) | 
 | 270 | { | 
 | 271 | 	unsigned long flags; | 
 | 272 |  | 
 | 273 | 	spin_lock_irqsave(&m2m_dev->job_spinlock, flags); | 
 | 274 | 	if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { | 
 | 275 | 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | 
 | 276 | 		dprintk("Called by an instance not currently running\n"); | 
 | 277 | 		return; | 
 | 278 | 	} | 
 | 279 |  | 
 | 280 | 	list_del(&m2m_dev->curr_ctx->queue); | 
 | 281 | 	m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 282 | 	wake_up(&m2m_dev->curr_ctx->finished); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 283 | 	m2m_dev->curr_ctx = NULL; | 
 | 284 |  | 
 | 285 | 	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | 
 | 286 |  | 
 | 287 | 	/* This instance might have more buffers ready, but since we do not | 
 | 288 | 	 * allow more than one job on the job_queue per instance, each has | 
 | 289 | 	 * to be scheduled separately after the previous one finishes. */ | 
 | 290 | 	v4l2_m2m_try_schedule(m2m_ctx); | 
 | 291 | 	v4l2_m2m_try_run(m2m_dev); | 
 | 292 | } | 
 | 293 | EXPORT_SYMBOL(v4l2_m2m_job_finish); | 
 | 294 |  | 
 | 295 | /** | 
 | 296 |  * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer | 
 | 297 |  */ | 
 | 298 | int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 
 | 299 | 		     struct v4l2_requestbuffers *reqbufs) | 
 | 300 | { | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 301 | 	struct vb2_queue *vq; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 302 |  | 
 | 303 | 	vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 304 | 	return vb2_reqbufs(vq, reqbufs); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 305 | } | 
 | 306 | EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); | 
 | 307 |  | 
 | 308 | /** | 
 | 309 |  * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer | 
 | 310 |  * | 
 | 311 |  * See v4l2_m2m_mmap() documentation for details. | 
 | 312 |  */ | 
 | 313 | int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 
 | 314 | 		      struct v4l2_buffer *buf) | 
 | 315 | { | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 316 | 	struct vb2_queue *vq; | 
 | 317 | 	int ret = 0; | 
 | 318 | 	unsigned int i; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 319 |  | 
 | 320 | 	vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 321 | 	ret = vb2_querybuf(vq, buf); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 322 |  | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 323 | 	/* Adjust MMAP memory offsets for the CAPTURE queue */ | 
 | 324 | 	if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) { | 
 | 325 | 		if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { | 
 | 326 | 			for (i = 0; i < buf->length; ++i) | 
 | 327 | 				buf->m.planes[i].m.mem_offset | 
 | 328 | 					+= DST_QUEUE_OFF_BASE; | 
 | 329 | 		} else { | 
 | 330 | 			buf->m.offset += DST_QUEUE_OFF_BASE; | 
 | 331 | 		} | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 332 | 	} | 
 | 333 |  | 
 | 334 | 	return ret; | 
 | 335 | } | 
 | 336 | EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); | 
 | 337 |  | 
 | 338 | /** | 
 | 339 |  * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on | 
 | 340 |  * the type | 
 | 341 |  */ | 
 | 342 | int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 
 | 343 | 		  struct v4l2_buffer *buf) | 
 | 344 | { | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 345 | 	struct vb2_queue *vq; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 346 | 	int ret; | 
 | 347 |  | 
 | 348 | 	vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 349 | 	ret = vb2_qbuf(vq, buf); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 350 | 	if (!ret) | 
 | 351 | 		v4l2_m2m_try_schedule(m2m_ctx); | 
 | 352 |  | 
 | 353 | 	return ret; | 
 | 354 | } | 
 | 355 | EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); | 
 | 356 |  | 
 | 357 | /** | 
 | 358 |  * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on | 
 | 359 |  * the type | 
 | 360 |  */ | 
 | 361 | int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 
 | 362 | 		   struct v4l2_buffer *buf) | 
 | 363 | { | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 364 | 	struct vb2_queue *vq; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 365 |  | 
 | 366 | 	vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 367 | 	return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 368 | } | 
 | 369 | EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); | 
 | 370 |  | 
 | 371 | /** | 
 | 372 |  * v4l2_m2m_streamon() - turn on streaming for a video queue | 
 | 373 |  */ | 
 | 374 | int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 
 | 375 | 		      enum v4l2_buf_type type) | 
 | 376 | { | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 377 | 	struct vb2_queue *vq; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 378 | 	int ret; | 
 | 379 |  | 
 | 380 | 	vq = v4l2_m2m_get_vq(m2m_ctx, type); | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 381 | 	ret = vb2_streamon(vq, type); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 382 | 	if (!ret) | 
 | 383 | 		v4l2_m2m_try_schedule(m2m_ctx); | 
 | 384 |  | 
 | 385 | 	return ret; | 
 | 386 | } | 
 | 387 | EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); | 
 | 388 |  | 
 | 389 | /** | 
 | 390 |  * v4l2_m2m_streamoff() - turn off streaming for a video queue | 
 | 391 |  */ | 
 | 392 | int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 
 | 393 | 		       enum v4l2_buf_type type) | 
 | 394 | { | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 395 | 	struct vb2_queue *vq; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 396 |  | 
 | 397 | 	vq = v4l2_m2m_get_vq(m2m_ctx, type); | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 398 | 	return vb2_streamoff(vq, type); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 399 | } | 
 | 400 | EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); | 
 | 401 |  | 
 | 402 | /** | 
 | 403 |  * v4l2_m2m_poll() - poll replacement, for destination buffers only | 
 | 404 |  * | 
 | 405 |  * Call from the driver's poll() function. Will poll both queues. If a buffer | 
 | 406 |  * is available to dequeue (with dqbuf) from the source queue, this will | 
 | 407 |  * indicate that a non-blocking write can be performed, while read will be | 
 | 408 |  * returned in case of the destination queue. | 
 | 409 |  */ | 
 | 410 | unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 
 | 411 | 			   struct poll_table_struct *wait) | 
 | 412 | { | 
| Hans Verkuil | 08eb851 | 2012-07-18 10:53:04 -0300 | [diff] [blame] | 413 | 	struct video_device *vfd = video_devdata(file); | 
 | 414 | 	unsigned long req_events = poll_requested_events(wait); | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 415 | 	struct vb2_queue *src_q, *dst_q; | 
 | 416 | 	struct vb2_buffer *src_vb = NULL, *dst_vb = NULL; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 417 | 	unsigned int rc = 0; | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 418 | 	unsigned long flags; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 419 |  | 
| Hans Verkuil | 08eb851 | 2012-07-18 10:53:04 -0300 | [diff] [blame] | 420 | 	if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { | 
 | 421 | 		struct v4l2_fh *fh = file->private_data; | 
 | 422 |  | 
 | 423 | 		if (v4l2_event_pending(fh)) | 
 | 424 | 			rc = POLLPRI; | 
 | 425 | 		else if (req_events & POLLPRI) | 
 | 426 | 			poll_wait(file, &fh->wait, wait); | 
 | 427 | 		if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM))) | 
 | 428 | 			return rc; | 
 | 429 | 	} | 
 | 430 |  | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 431 | 	src_q = v4l2_m2m_get_src_vq(m2m_ctx); | 
 | 432 | 	dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); | 
 | 433 |  | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 434 | 	/* | 
 | 435 | 	 * There has to be at least one buffer queued on each queued_list, which | 
 | 436 | 	 * means either in driver already or waiting for driver to claim it | 
 | 437 | 	 * and start processing. | 
 | 438 | 	 */ | 
 | 439 | 	if ((!src_q->streaming || list_empty(&src_q->queued_list)) | 
 | 440 | 		&& (!dst_q->streaming || list_empty(&dst_q->queued_list))) { | 
| Hans Verkuil | 08eb851 | 2012-07-18 10:53:04 -0300 | [diff] [blame] | 441 | 		rc |= POLLERR; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 442 | 		goto end; | 
 | 443 | 	} | 
 | 444 |  | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 445 | 	if (m2m_ctx->m2m_dev->m2m_ops->unlock) | 
 | 446 | 		m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv); | 
 | 447 |  | 
 | 448 | 	poll_wait(file, &src_q->done_wq, wait); | 
 | 449 | 	poll_wait(file, &dst_q->done_wq, wait); | 
 | 450 |  | 
 | 451 | 	if (m2m_ctx->m2m_dev->m2m_ops->lock) | 
 | 452 | 		m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv); | 
 | 453 |  | 
 | 454 | 	spin_lock_irqsave(&src_q->done_lock, flags); | 
 | 455 | 	if (!list_empty(&src_q->done_list)) | 
 | 456 | 		src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer, | 
 | 457 | 						done_entry); | 
 | 458 | 	if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE | 
 | 459 | 			|| src_vb->state == VB2_BUF_STATE_ERROR)) | 
 | 460 | 		rc |= POLLOUT | POLLWRNORM; | 
 | 461 | 	spin_unlock_irqrestore(&src_q->done_lock, flags); | 
 | 462 |  | 
 | 463 | 	spin_lock_irqsave(&dst_q->done_lock, flags); | 
 | 464 | 	if (!list_empty(&dst_q->done_list)) | 
 | 465 | 		dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer, | 
 | 466 | 						done_entry); | 
 | 467 | 	if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE | 
 | 468 | 			|| dst_vb->state == VB2_BUF_STATE_ERROR)) | 
 | 469 | 		rc |= POLLIN | POLLRDNORM; | 
 | 470 | 	spin_unlock_irqrestore(&dst_q->done_lock, flags); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 471 |  | 
 | 472 | end: | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 473 | 	return rc; | 
 | 474 | } | 
 | 475 | EXPORT_SYMBOL_GPL(v4l2_m2m_poll); | 
 | 476 |  | 
 | 477 | /** | 
 | 478 |  * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer | 
 | 479 |  * | 
 | 480 |  * Call from driver's mmap() function. Will handle mmap() for both queues | 
 | 481 |  * seamlessly for videobuffer, which will receive normal per-queue offsets and | 
 | 482 |  * proper videobuf queue pointers. The differentiation is made outside videobuf | 
 | 483 |  * by adding a predefined offset to buffers from one of the queues and | 
 | 484 |  * subtracting it before passing it back to videobuf. Only drivers (and | 
 | 485 |  * thus applications) receive modified offsets. | 
 | 486 |  */ | 
 | 487 | int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 
 | 488 | 			 struct vm_area_struct *vma) | 
 | 489 | { | 
 | 490 | 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 491 | 	struct vb2_queue *vq; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 492 |  | 
 | 493 | 	if (offset < DST_QUEUE_OFF_BASE) { | 
 | 494 | 		vq = v4l2_m2m_get_src_vq(m2m_ctx); | 
 | 495 | 	} else { | 
 | 496 | 		vq = v4l2_m2m_get_dst_vq(m2m_ctx); | 
 | 497 | 		vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); | 
 | 498 | 	} | 
 | 499 |  | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 500 | 	return vb2_mmap(vq, vma); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 501 | } | 
 | 502 | EXPORT_SYMBOL(v4l2_m2m_mmap); | 
 | 503 |  | 
 | 504 | /** | 
 | 505 |  * v4l2_m2m_init() - initialize per-driver m2m data | 
 | 506 |  * | 
 | 507 |  * Usually called from driver's probe() function. | 
 | 508 |  */ | 
 | 509 | struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops) | 
 | 510 | { | 
 | 511 | 	struct v4l2_m2m_dev *m2m_dev; | 
 | 512 |  | 
| Nicolas THERY | 3fac4eb | 2012-10-23 04:47:19 -0300 | [diff] [blame^] | 513 | 	if (!m2m_ops || WARN_ON(!m2m_ops->device_run) || | 
 | 514 | 			WARN_ON(!m2m_ops->job_abort)) | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 515 | 		return ERR_PTR(-EINVAL); | 
 | 516 |  | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 517 | 	m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); | 
 | 518 | 	if (!m2m_dev) | 
 | 519 | 		return ERR_PTR(-ENOMEM); | 
 | 520 |  | 
 | 521 | 	m2m_dev->curr_ctx = NULL; | 
 | 522 | 	m2m_dev->m2m_ops = m2m_ops; | 
 | 523 | 	INIT_LIST_HEAD(&m2m_dev->job_queue); | 
 | 524 | 	spin_lock_init(&m2m_dev->job_spinlock); | 
 | 525 |  | 
 | 526 | 	return m2m_dev; | 
 | 527 | } | 
 | 528 | EXPORT_SYMBOL_GPL(v4l2_m2m_init); | 
 | 529 |  | 
 | 530 | /** | 
 | 531 |  * v4l2_m2m_release() - cleans up and frees a m2m_dev structure | 
 | 532 |  * | 
 | 533 |  * Usually called from driver's remove() function. | 
 | 534 |  */ | 
 | 535 | void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) | 
 | 536 | { | 
 | 537 | 	kfree(m2m_dev); | 
 | 538 | } | 
 | 539 | EXPORT_SYMBOL_GPL(v4l2_m2m_release); | 
 | 540 |  | 
 | 541 | /** | 
 | 542 |  * v4l2_m2m_ctx_init() - allocate and initialize a m2m context | 
 | 543 |  * @priv - driver's instance private data | 
 | 544 |  * @m2m_dev - a previously initialized m2m_dev struct | 
 | 545 |  * @vq_init - a callback for queue type-specific initialization function to be | 
 | 546 |  * used for initializing videobuf_queues | 
 | 547 |  * | 
 | 548 |  * Usually called from driver's open() function. | 
 | 549 |  */ | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 550 | struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, | 
 | 551 | 		void *drv_priv, | 
 | 552 | 		int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 553 | { | 
 | 554 | 	struct v4l2_m2m_ctx *m2m_ctx; | 
 | 555 | 	struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 556 | 	int ret; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 557 |  | 
 | 558 | 	m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); | 
 | 559 | 	if (!m2m_ctx) | 
 | 560 | 		return ERR_PTR(-ENOMEM); | 
 | 561 |  | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 562 | 	m2m_ctx->priv = drv_priv; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 563 | 	m2m_ctx->m2m_dev = m2m_dev; | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 564 | 	init_waitqueue_head(&m2m_ctx->finished); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 565 |  | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 566 | 	out_q_ctx = &m2m_ctx->out_q_ctx; | 
 | 567 | 	cap_q_ctx = &m2m_ctx->cap_q_ctx; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 568 |  | 
 | 569 | 	INIT_LIST_HEAD(&out_q_ctx->rdy_queue); | 
 | 570 | 	INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 571 | 	spin_lock_init(&out_q_ctx->rdy_spinlock); | 
 | 572 | 	spin_lock_init(&cap_q_ctx->rdy_spinlock); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 573 |  | 
 | 574 | 	INIT_LIST_HEAD(&m2m_ctx->queue); | 
 | 575 |  | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 576 | 	ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); | 
 | 577 |  | 
 | 578 | 	if (ret) | 
 | 579 | 		goto err; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 580 |  | 
 | 581 | 	return m2m_ctx; | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 582 | err: | 
 | 583 | 	kfree(m2m_ctx); | 
 | 584 | 	return ERR_PTR(ret); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 585 | } | 
 | 586 | EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); | 
 | 587 |  | 
 | 588 | /** | 
 | 589 |  * v4l2_m2m_ctx_release() - release m2m context | 
 | 590 |  * | 
 | 591 |  * Usually called from driver's release() function. | 
 | 592 |  */ | 
 | 593 | void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) | 
 | 594 | { | 
 | 595 | 	struct v4l2_m2m_dev *m2m_dev; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 596 | 	unsigned long flags; | 
 | 597 |  | 
 | 598 | 	m2m_dev = m2m_ctx->m2m_dev; | 
 | 599 |  | 
 | 600 | 	spin_lock_irqsave(&m2m_dev->job_spinlock, flags); | 
 | 601 | 	if (m2m_ctx->job_flags & TRANS_RUNNING) { | 
 | 602 | 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | 
 | 603 | 		m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); | 
 | 604 | 		dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 605 | 		wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING)); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 606 | 	} else if (m2m_ctx->job_flags & TRANS_QUEUED) { | 
 | 607 | 		list_del(&m2m_ctx->queue); | 
 | 608 | 		m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); | 
 | 609 | 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | 
 | 610 | 		dprintk("m2m_ctx: %p had been on queue and was removed\n", | 
 | 611 | 			m2m_ctx); | 
 | 612 | 	} else { | 
 | 613 | 		/* Do nothing, was not on queue/running */ | 
 | 614 | 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | 
 | 615 | 	} | 
 | 616 |  | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 617 | 	vb2_queue_release(&m2m_ctx->cap_q_ctx.q); | 
 | 618 | 	vb2_queue_release(&m2m_ctx->out_q_ctx.q); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 619 |  | 
 | 620 | 	kfree(m2m_ctx); | 
 | 621 | } | 
 | 622 | EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); | 
 | 623 |  | 
 | 624 | /** | 
 | 625 |  * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list. | 
 | 626 |  * | 
 | 627 |  * Call from buf_queue(), videobuf_queue_ops callback. | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 628 |  */ | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 629 | void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb) | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 630 | { | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 631 | 	struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 632 | 	struct v4l2_m2m_queue_ctx *q_ctx; | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 633 | 	unsigned long flags; | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 634 |  | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 635 | 	q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 636 | 	if (!q_ctx) | 
 | 637 | 		return; | 
 | 638 |  | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 639 | 	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); | 
 | 640 | 	list_add_tail(&b->list, &q_ctx->rdy_queue); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 641 | 	q_ctx->num_rdy++; | 
| Marek Szyprowski | 908a0d7 | 2011-01-12 06:50:24 -0300 | [diff] [blame] | 642 | 	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); | 
| Pawel Osciak | 7f98639 | 2010-04-23 05:38:37 -0300 | [diff] [blame] | 643 | } | 
 | 644 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); | 
 | 645 |  |