blob: b80fd24debfab24d9c2b97ceb1225bc8daa920f3 [file] [log] [blame]
Pawel Osciake23ccc02010-10-11 10:56:41 -03001/*
2 * videobuf2-core.c - V4L2 driver helper framework
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
Pawel Osciak95072082011-03-13 15:23:32 -03006 * Author: Pawel Osciak <pawel@osciak.com>
Pawel Osciake23ccc02010-10-11 10:56:41 -03007 * Marek Szyprowski <m.szyprowski@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation.
12 */
13
14#include <linux/err.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mm.h>
18#include <linux/poll.h>
19#include <linux/slab.h>
20#include <linux/sched.h>
21
Hans Verkuil95213ce2011-07-13 04:26:52 -030022#include <media/v4l2-dev.h>
23#include <media/v4l2-fh.h>
24#include <media/v4l2-event.h>
Hans Verkuilebd7c502014-04-11 04:36:57 -030025#include <media/v4l2-common.h>
Pawel Osciake23ccc02010-10-11 10:56:41 -030026#include <media/videobuf2-core.h>
27
28static int debug;
29module_param(debug, int, 0644);
30
Hans Verkuilfd4354c2014-04-07 09:08:47 -030031#define dprintk(level, fmt, arg...) \
32 do { \
33 if (debug >= level) \
34 pr_debug("vb2: %s: " fmt, __func__, ## arg); \
Pawel Osciake23ccc02010-10-11 10:56:41 -030035 } while (0)
36
Hans Verkuilb5b45412014-01-29 11:53:25 -030037#ifdef CONFIG_VIDEO_ADV_DEBUG
38
39/*
Hans Verkuila1d36d82014-03-17 09:54:21 -030040 * If advanced debugging is on, then count how often each op is called
41 * successfully, which can either be per-buffer or per-queue.
Hans Verkuilb5b45412014-01-29 11:53:25 -030042 *
Hans Verkuila1d36d82014-03-17 09:54:21 -030043 * This makes it easy to check that the 'init' and 'cleanup'
Hans Verkuilb5b45412014-01-29 11:53:25 -030044 * (and variations thereof) stay balanced.
45 */
46
Hans Verkuila1d36d82014-03-17 09:54:21 -030047#define log_memop(vb, op) \
48 dprintk(2, "call_memop(%p, %d, %s)%s\n", \
49 (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \
50 (vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
51
Hans Verkuilb5b45412014-01-29 11:53:25 -030052#define call_memop(vb, op, args...) \
53({ \
54 struct vb2_queue *_q = (vb)->vb2_queue; \
Hans Verkuila1d36d82014-03-17 09:54:21 -030055 int err; \
56 \
57 log_memop(vb, op); \
58 err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \
59 if (!err) \
60 (vb)->cnt_mem_ ## op++; \
61 err; \
Hans Verkuilb5b45412014-01-29 11:53:25 -030062})
Hans Verkuila1d36d82014-03-17 09:54:21 -030063
64#define call_ptr_memop(vb, op, args...) \
65({ \
66 struct vb2_queue *_q = (vb)->vb2_queue; \
67 void *ptr; \
68 \
69 log_memop(vb, op); \
70 ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \
71 if (!IS_ERR_OR_NULL(ptr)) \
72 (vb)->cnt_mem_ ## op++; \
73 ptr; \
74})
75
76#define call_void_memop(vb, op, args...) \
77({ \
78 struct vb2_queue *_q = (vb)->vb2_queue; \
79 \
80 log_memop(vb, op); \
81 if (_q->mem_ops->op) \
82 _q->mem_ops->op(args); \
83 (vb)->cnt_mem_ ## op++; \
84})
85
86#define log_qop(q, op) \
87 dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
88 (q)->ops->op ? "" : " (nop)")
Pawel Osciake23ccc02010-10-11 10:56:41 -030089
90#define call_qop(q, op, args...) \
Hans Verkuilb5b45412014-01-29 11:53:25 -030091({ \
Hans Verkuila1d36d82014-03-17 09:54:21 -030092 int err; \
93 \
94 log_qop(q, op); \
95 err = (q)->ops->op ? (q)->ops->op(args) : 0; \
96 if (!err) \
97 (q)->cnt_ ## op++; \
98 err; \
Hans Verkuilb5b45412014-01-29 11:53:25 -030099})
Hans Verkuila1d36d82014-03-17 09:54:21 -0300100
101#define call_void_qop(q, op, args...) \
102({ \
103 log_qop(q, op); \
104 if ((q)->ops->op) \
105 (q)->ops->op(args); \
106 (q)->cnt_ ## op++; \
107})
108
109#define log_vb_qop(vb, op, args...) \
110 dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \
111 (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \
112 (vb)->vb2_queue->ops->op ? "" : " (nop)")
Hans Verkuilb5b45412014-01-29 11:53:25 -0300113
114#define call_vb_qop(vb, op, args...) \
115({ \
Hans Verkuila1d36d82014-03-17 09:54:21 -0300116 int err; \
117 \
118 log_vb_qop(vb, op); \
119 err = (vb)->vb2_queue->ops->op ? \
120 (vb)->vb2_queue->ops->op(args) : 0; \
121 if (!err) \
122 (vb)->cnt_ ## op++; \
123 err; \
Hans Verkuilb5b45412014-01-29 11:53:25 -0300124})
Hans Verkuila1d36d82014-03-17 09:54:21 -0300125
126#define call_void_vb_qop(vb, op, args...) \
127({ \
128 log_vb_qop(vb, op); \
129 if ((vb)->vb2_queue->ops->op) \
130 (vb)->vb2_queue->ops->op(args); \
131 (vb)->cnt_ ## op++; \
132})
Hans Verkuilb5b45412014-01-29 11:53:25 -0300133
134#else
135
136#define call_memop(vb, op, args...) \
Hans Verkuila1d36d82014-03-17 09:54:21 -0300137 ((vb)->vb2_queue->mem_ops->op ? \
138 (vb)->vb2_queue->mem_ops->op(args) : 0)
139
140#define call_ptr_memop(vb, op, args...) \
141 ((vb)->vb2_queue->mem_ops->op ? \
142 (vb)->vb2_queue->mem_ops->op(args) : NULL)
143
144#define call_void_memop(vb, op, args...) \
145 do { \
146 if ((vb)->vb2_queue->mem_ops->op) \
147 (vb)->vb2_queue->mem_ops->op(args); \
148 } while (0)
Hans Verkuilb5b45412014-01-29 11:53:25 -0300149
150#define call_qop(q, op, args...) \
151 ((q)->ops->op ? (q)->ops->op(args) : 0)
Hans Verkuila1d36d82014-03-17 09:54:21 -0300152
153#define call_void_qop(q, op, args...) \
154 do { \
155 if ((q)->ops->op) \
156 (q)->ops->op(args); \
157 } while (0)
Hans Verkuilb5b45412014-01-29 11:53:25 -0300158
159#define call_vb_qop(vb, op, args...) \
160 ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
Hans Verkuila1d36d82014-03-17 09:54:21 -0300161
162#define call_void_vb_qop(vb, op, args...) \
163 do { \
164 if ((vb)->vb2_queue->ops->op) \
165 (vb)->vb2_queue->ops->op(args); \
166 } while (0)
Hans Verkuilb5b45412014-01-29 11:53:25 -0300167
168#endif
Pawel Osciake23ccc02010-10-11 10:56:41 -0300169
Hans Verkuilf1343282014-02-24 14:44:50 -0300170/* Flags that are set by the vb2 core */
Sakari Ailus1b18e7a2012-10-22 17:10:16 -0300171#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300172 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
Sakari Ailus1b18e7a2012-10-22 17:10:16 -0300173 V4L2_BUF_FLAG_PREPARED | \
174 V4L2_BUF_FLAG_TIMESTAMP_MASK)
Hans Verkuilf1343282014-02-24 14:44:50 -0300175/* Output buffer flags that should be passed on to the driver */
176#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
177 V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
Marek Szyprowskiea42c8e2011-04-12 10:14:13 -0300178
Hans Verkuilfb64dca2014-02-28 12:49:18 -0300179static void __vb2_queue_cancel(struct vb2_queue *q);
180
Pawel Osciake23ccc02010-10-11 10:56:41 -0300181/**
182 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
183 */
Marek Szyprowskic1426bc2011-08-24 06:36:26 -0300184static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300185{
186 struct vb2_queue *q = vb->vb2_queue;
187 void *mem_priv;
188 int plane;
189
Mauro Carvalho Chehab7f841452013-04-19 07:18:01 -0300190 /*
191 * Allocate memory for all planes in this buffer
192 * NOTE: mmapped areas should be page aligned
193 */
Pawel Osciake23ccc02010-10-11 10:56:41 -0300194 for (plane = 0; plane < vb->num_planes; ++plane) {
Mauro Carvalho Chehab7f841452013-04-19 07:18:01 -0300195 unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]);
196
Hans Verkuila1d36d82014-03-17 09:54:21 -0300197 mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane],
Mauro Carvalho Chehab7f841452013-04-19 07:18:01 -0300198 size, q->gfp_flags);
Guennadi Liakhovetski62a79432011-03-22 09:24:58 -0300199 if (IS_ERR_OR_NULL(mem_priv))
Pawel Osciake23ccc02010-10-11 10:56:41 -0300200 goto free;
201
202 /* Associate allocator private data with this plane */
203 vb->planes[plane].mem_priv = mem_priv;
Marek Szyprowskic1426bc2011-08-24 06:36:26 -0300204 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
Pawel Osciake23ccc02010-10-11 10:56:41 -0300205 }
206
207 return 0;
208free:
209 /* Free already allocated memory if one of the allocations failed */
Marek Szyprowskia00d0262011-12-15 05:53:06 -0300210 for (; plane > 0; --plane) {
Hans Verkuila1d36d82014-03-17 09:54:21 -0300211 call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
Marek Szyprowskia00d0262011-12-15 05:53:06 -0300212 vb->planes[plane - 1].mem_priv = NULL;
213 }
Pawel Osciake23ccc02010-10-11 10:56:41 -0300214
215 return -ENOMEM;
216}
217
218/**
219 * __vb2_buf_mem_free() - free memory of the given buffer
220 */
221static void __vb2_buf_mem_free(struct vb2_buffer *vb)
222{
Pawel Osciake23ccc02010-10-11 10:56:41 -0300223 unsigned int plane;
224
225 for (plane = 0; plane < vb->num_planes; ++plane) {
Hans Verkuila1d36d82014-03-17 09:54:21 -0300226 call_void_memop(vb, put, vb->planes[plane].mem_priv);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300227 vb->planes[plane].mem_priv = NULL;
Marek Szyprowskia00d0262011-12-15 05:53:06 -0300228 dprintk(3, "Freed plane %d of buffer %d\n", plane,
229 vb->v4l2_buf.index);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300230 }
231}
232
233/**
234 * __vb2_buf_userptr_put() - release userspace memory associated with
235 * a USERPTR buffer
236 */
237static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
238{
Pawel Osciake23ccc02010-10-11 10:56:41 -0300239 unsigned int plane;
240
241 for (plane = 0; plane < vb->num_planes; ++plane) {
Marek Szyprowskia00d0262011-12-15 05:53:06 -0300242 if (vb->planes[plane].mem_priv)
Hans Verkuila1d36d82014-03-17 09:54:21 -0300243 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
Marek Szyprowskia00d0262011-12-15 05:53:06 -0300244 vb->planes[plane].mem_priv = NULL;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300245 }
246}
247
248/**
Sumit Semwalc5384042012-06-14 10:37:37 -0300249 * __vb2_plane_dmabuf_put() - release memory associated with
250 * a DMABUF shared plane
251 */
Hans Verkuilb5b45412014-01-29 11:53:25 -0300252static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
Sumit Semwalc5384042012-06-14 10:37:37 -0300253{
254 if (!p->mem_priv)
255 return;
256
257 if (p->dbuf_mapped)
Hans Verkuila1d36d82014-03-17 09:54:21 -0300258 call_void_memop(vb, unmap_dmabuf, p->mem_priv);
Sumit Semwalc5384042012-06-14 10:37:37 -0300259
Hans Verkuila1d36d82014-03-17 09:54:21 -0300260 call_void_memop(vb, detach_dmabuf, p->mem_priv);
Sumit Semwalc5384042012-06-14 10:37:37 -0300261 dma_buf_put(p->dbuf);
262 memset(p, 0, sizeof(*p));
263}
264
265/**
266 * __vb2_buf_dmabuf_put() - release memory associated with
267 * a DMABUF shared buffer
268 */
269static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
270{
Sumit Semwalc5384042012-06-14 10:37:37 -0300271 unsigned int plane;
272
273 for (plane = 0; plane < vb->num_planes; ++plane)
Hans Verkuilb5b45412014-01-29 11:53:25 -0300274 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
Sumit Semwalc5384042012-06-14 10:37:37 -0300275}
276
277/**
Hans Verkuila5e3d742013-12-04 15:14:05 +0100278 * __setup_lengths() - setup initial lengths for every plane in
279 * every buffer on the queue
280 */
281static void __setup_lengths(struct vb2_queue *q, unsigned int n)
282{
283 unsigned int buffer, plane;
284 struct vb2_buffer *vb;
285
286 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
287 vb = q->bufs[buffer];
288 if (!vb)
289 continue;
290
291 for (plane = 0; plane < vb->num_planes; ++plane)
292 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
293 }
294}
295
296/**
Pawel Osciake23ccc02010-10-11 10:56:41 -0300297 * __setup_offsets() - setup unique offsets ("cookies") for every plane in
298 * every buffer on the queue
299 */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300300static void __setup_offsets(struct vb2_queue *q, unsigned int n)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300301{
302 unsigned int buffer, plane;
303 struct vb2_buffer *vb;
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300304 unsigned long off;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300305
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300306 if (q->num_buffers) {
307 struct v4l2_plane *p;
308 vb = q->bufs[q->num_buffers - 1];
309 p = &vb->v4l2_planes[vb->num_planes - 1];
310 off = PAGE_ALIGN(p->m.mem_offset + p->length);
311 } else {
312 off = 0;
313 }
314
315 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
Pawel Osciake23ccc02010-10-11 10:56:41 -0300316 vb = q->bufs[buffer];
317 if (!vb)
318 continue;
319
320 for (plane = 0; plane < vb->num_planes; ++plane) {
321 vb->v4l2_planes[plane].m.mem_offset = off;
322
323 dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
324 buffer, plane, off);
325
326 off += vb->v4l2_planes[plane].length;
327 off = PAGE_ALIGN(off);
328 }
329 }
330}
331
332/**
333 * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
334 * video buffer memory for all buffers/planes on the queue and initializes the
335 * queue
336 *
337 * Returns the number of buffers successfully allocated.
338 */
339static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
Marek Szyprowskic1426bc2011-08-24 06:36:26 -0300340 unsigned int num_buffers, unsigned int num_planes)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300341{
342 unsigned int buffer;
343 struct vb2_buffer *vb;
344 int ret;
345
346 for (buffer = 0; buffer < num_buffers; ++buffer) {
347 /* Allocate videobuf buffer structures */
348 vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
349 if (!vb) {
350 dprintk(1, "Memory alloc for buffer struct failed\n");
351 break;
352 }
353
354 /* Length stores number of planes for multiplanar buffers */
355 if (V4L2_TYPE_IS_MULTIPLANAR(q->type))
356 vb->v4l2_buf.length = num_planes;
357
358 vb->state = VB2_BUF_STATE_DEQUEUED;
359 vb->vb2_queue = q;
360 vb->num_planes = num_planes;
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300361 vb->v4l2_buf.index = q->num_buffers + buffer;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300362 vb->v4l2_buf.type = q->type;
363 vb->v4l2_buf.memory = memory;
364
365 /* Allocate video buffer memory for the MMAP type */
366 if (memory == V4L2_MEMORY_MMAP) {
Marek Szyprowskic1426bc2011-08-24 06:36:26 -0300367 ret = __vb2_buf_mem_alloc(vb);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300368 if (ret) {
369 dprintk(1, "Failed allocating memory for "
370 "buffer %d\n", buffer);
371 kfree(vb);
372 break;
373 }
374 /*
375 * Call the driver-provided buffer initialization
376 * callback, if given. An error in initialization
377 * results in queue setup failure.
378 */
Hans Verkuilb5b45412014-01-29 11:53:25 -0300379 ret = call_vb_qop(vb, buf_init, vb);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300380 if (ret) {
381 dprintk(1, "Buffer %d %p initialization"
382 " failed\n", buffer, vb);
383 __vb2_buf_mem_free(vb);
384 kfree(vb);
385 break;
386 }
387 }
388
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300389 q->bufs[q->num_buffers + buffer] = vb;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300390 }
391
Hans Verkuila5e3d742013-12-04 15:14:05 +0100392 __setup_lengths(q, buffer);
Philipp Zabeldc775232013-09-19 04:37:29 -0300393 if (memory == V4L2_MEMORY_MMAP)
394 __setup_offsets(q, buffer);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300395
396 dprintk(1, "Allocated %d buffers, %d plane(s) each\n",
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300397 buffer, num_planes);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300398
399 return buffer;
400}
401
402/**
403 * __vb2_free_mem() - release all video buffer memory for a given queue
404 */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300405static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300406{
407 unsigned int buffer;
408 struct vb2_buffer *vb;
409
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300410 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
411 ++buffer) {
Pawel Osciake23ccc02010-10-11 10:56:41 -0300412 vb = q->bufs[buffer];
413 if (!vb)
414 continue;
415
416 /* Free MMAP buffers or release USERPTR buffers */
417 if (q->memory == V4L2_MEMORY_MMAP)
418 __vb2_buf_mem_free(vb);
Sumit Semwalc5384042012-06-14 10:37:37 -0300419 else if (q->memory == V4L2_MEMORY_DMABUF)
420 __vb2_buf_dmabuf_put(vb);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300421 else
422 __vb2_buf_userptr_put(vb);
423 }
424}
425
426/**
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300427 * __vb2_queue_free() - free buffers at the end of the queue - video memory and
428 * related information, if no buffers are left return the queue to an
429 * uninitialized state. Might be called even if the queue has already been freed.
Pawel Osciake23ccc02010-10-11 10:56:41 -0300430 */
Hans Verkuil63faabf2013-12-13 13:13:40 -0300431static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300432{
433 unsigned int buffer;
434
Hans Verkuil63faabf2013-12-13 13:13:40 -0300435 /*
436 * Sanity check: when preparing a buffer the queue lock is released for
437 * a short while (see __buf_prepare for the details), which would allow
438 * a race with a reqbufs which can call this function. Removing the
439 * buffers from underneath __buf_prepare is obviously a bad idea, so we
440 * check if any of the buffers is in the state PREPARING, and if so we
441 * just return -EAGAIN.
442 */
443 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
444 ++buffer) {
445 if (q->bufs[buffer] == NULL)
446 continue;
447 if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300448 dprintk(1, "preparing buffers, cannot free\n");
Hans Verkuil63faabf2013-12-13 13:13:40 -0300449 return -EAGAIN;
450 }
451 }
452
Pawel Osciake23ccc02010-10-11 10:56:41 -0300453 /* Call driver-provided cleanup function for each buffer, if provided */
Hans Verkuilb5b45412014-01-29 11:53:25 -0300454 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
455 ++buffer) {
Hans Verkuil256f3162014-01-29 13:36:53 -0300456 struct vb2_buffer *vb = q->bufs[buffer];
457
458 if (vb && vb->planes[0].mem_priv)
Hans Verkuila1d36d82014-03-17 09:54:21 -0300459 call_void_vb_qop(vb, buf_cleanup, vb);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300460 }
461
462 /* Release video buffer memory */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300463 __vb2_free_mem(q, buffers);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300464
Hans Verkuilb5b45412014-01-29 11:53:25 -0300465#ifdef CONFIG_VIDEO_ADV_DEBUG
466 /*
467 * Check that all the calls were balances during the life-time of this
468 * queue. If not (or if the debug level is 1 or up), then dump the
469 * counters to the kernel log.
470 */
471 if (q->num_buffers) {
472 bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
473 q->cnt_wait_prepare != q->cnt_wait_finish;
474
475 if (unbalanced || debug) {
476 pr_info("vb2: counters for queue %p:%s\n", q,
477 unbalanced ? " UNBALANCED!" : "");
478 pr_info("vb2: setup: %u start_streaming: %u stop_streaming: %u\n",
479 q->cnt_queue_setup, q->cnt_start_streaming,
480 q->cnt_stop_streaming);
481 pr_info("vb2: wait_prepare: %u wait_finish: %u\n",
482 q->cnt_wait_prepare, q->cnt_wait_finish);
483 }
484 q->cnt_queue_setup = 0;
485 q->cnt_wait_prepare = 0;
486 q->cnt_wait_finish = 0;
487 q->cnt_start_streaming = 0;
488 q->cnt_stop_streaming = 0;
489 }
490 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
491 struct vb2_buffer *vb = q->bufs[buffer];
492 bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
493 vb->cnt_mem_prepare != vb->cnt_mem_finish ||
494 vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
495 vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
496 vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
497 vb->cnt_buf_queue != vb->cnt_buf_done ||
498 vb->cnt_buf_prepare != vb->cnt_buf_finish ||
499 vb->cnt_buf_init != vb->cnt_buf_cleanup;
500
501 if (unbalanced || debug) {
502 pr_info("vb2: counters for queue %p, buffer %d:%s\n",
503 q, buffer, unbalanced ? " UNBALANCED!" : "");
504 pr_info("vb2: buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
505 vb->cnt_buf_init, vb->cnt_buf_cleanup,
506 vb->cnt_buf_prepare, vb->cnt_buf_finish);
507 pr_info("vb2: buf_queue: %u buf_done: %u\n",
508 vb->cnt_buf_queue, vb->cnt_buf_done);
509 pr_info("vb2: alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
510 vb->cnt_mem_alloc, vb->cnt_mem_put,
511 vb->cnt_mem_prepare, vb->cnt_mem_finish,
512 vb->cnt_mem_mmap);
513 pr_info("vb2: get_userptr: %u put_userptr: %u\n",
514 vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
515 pr_info("vb2: attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
516 vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
517 vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
518 pr_info("vb2: get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
519 vb->cnt_mem_get_dmabuf,
520 vb->cnt_mem_num_users,
521 vb->cnt_mem_vaddr,
522 vb->cnt_mem_cookie);
523 }
524 }
525#endif
526
Pawel Osciake23ccc02010-10-11 10:56:41 -0300527 /* Free videobuf buffers */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300528 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
529 ++buffer) {
Pawel Osciake23ccc02010-10-11 10:56:41 -0300530 kfree(q->bufs[buffer]);
531 q->bufs[buffer] = NULL;
532 }
533
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300534 q->num_buffers -= buffers;
Hans Verkuila7afcac2014-02-24 13:41:20 -0300535 if (!q->num_buffers) {
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300536 q->memory = 0;
Hans Verkuila7afcac2014-02-24 13:41:20 -0300537 INIT_LIST_HEAD(&q->queued_list);
538 }
Hans Verkuil63faabf2013-12-13 13:13:40 -0300539 return 0;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300540}
541
542/**
543 * __verify_planes_array() - verify that the planes array passed in struct
544 * v4l2_buffer from userspace can be safely used
545 */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300546static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300547{
Hans Verkuil32a77262012-09-28 06:12:53 -0300548 if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
549 return 0;
550
Pawel Osciake23ccc02010-10-11 10:56:41 -0300551 /* Is memory for copying plane information present? */
552 if (NULL == b->m.planes) {
553 dprintk(1, "Multi-planar buffer passed but "
554 "planes array not provided\n");
555 return -EINVAL;
556 }
557
558 if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) {
559 dprintk(1, "Incorrect planes array length, "
560 "expected %d, got %d\n", vb->num_planes, b->length);
561 return -EINVAL;
562 }
563
564 return 0;
565}
566
567/**
Laurent Pinchart8023ed02012-07-10 10:41:40 -0300568 * __verify_length() - Verify that the bytesused value for each plane fits in
569 * the plane length and that the data offset doesn't exceed the bytesused value.
570 */
571static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
572{
573 unsigned int length;
574 unsigned int plane;
575
576 if (!V4L2_TYPE_IS_OUTPUT(b->type))
577 return 0;
578
579 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
580 for (plane = 0; plane < vb->num_planes; ++plane) {
581 length = (b->memory == V4L2_MEMORY_USERPTR)
582 ? b->m.planes[plane].length
583 : vb->v4l2_planes[plane].length;
584
585 if (b->m.planes[plane].bytesused > length)
586 return -EINVAL;
Sylwester Nawrocki3c5c23c2013-08-26 11:47:09 -0300587
588 if (b->m.planes[plane].data_offset > 0 &&
589 b->m.planes[plane].data_offset >=
Laurent Pinchart8023ed02012-07-10 10:41:40 -0300590 b->m.planes[plane].bytesused)
591 return -EINVAL;
592 }
593 } else {
594 length = (b->memory == V4L2_MEMORY_USERPTR)
595 ? b->length : vb->v4l2_planes[0].length;
596
597 if (b->bytesused > length)
598 return -EINVAL;
599 }
600
601 return 0;
602}
603
604/**
Marek Szyprowski25a27d92011-08-24 06:49:35 -0300605 * __buffer_in_use() - return true if the buffer is in use and
606 * the queue cannot be freed (by the means of REQBUFS(0)) call
607 */
608static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
609{
610 unsigned int plane;
611 for (plane = 0; plane < vb->num_planes; ++plane) {
Marek Szyprowski2c2dd6ac2011-10-12 13:09:53 -0300612 void *mem_priv = vb->planes[plane].mem_priv;
Marek Szyprowski25a27d92011-08-24 06:49:35 -0300613 /*
614 * If num_users() has not been provided, call_memop
615 * will return 0, apparently nobody cares about this
616 * case anyway. If num_users() returns more than 1,
617 * we are not the only user of the plane's memory.
618 */
Hans Verkuilb5b45412014-01-29 11:53:25 -0300619 if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
Marek Szyprowski25a27d92011-08-24 06:49:35 -0300620 return true;
621 }
622 return false;
623}
624
625/**
626 * __buffers_in_use() - return true if any buffers on the queue are in use and
627 * the queue cannot be freed (by the means of REQBUFS(0)) call
628 */
629static bool __buffers_in_use(struct vb2_queue *q)
630{
631 unsigned int buffer;
632 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
633 if (__buffer_in_use(q, q->bufs[buffer]))
634 return true;
635 }
636 return false;
637}
638
639/**
Pawel Osciake23ccc02010-10-11 10:56:41 -0300640 * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
641 * returned to userspace
642 */
Hans Verkuil32a77262012-09-28 06:12:53 -0300643static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300644{
645 struct vb2_queue *q = vb->vb2_queue;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300646
Sakari Ailus2b719d72012-05-02 09:40:03 -0300647 /* Copy back data such as timestamp, flags, etc. */
Pawel Osciake23ccc02010-10-11 10:56:41 -0300648 memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
Sakari Ailus2b719d72012-05-02 09:40:03 -0300649 b->reserved2 = vb->v4l2_buf.reserved2;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300650 b->reserved = vb->v4l2_buf.reserved;
651
652 if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
Pawel Osciake23ccc02010-10-11 10:56:41 -0300653 /*
654 * Fill in plane-related data if userspace provided an array
Hans Verkuil32a77262012-09-28 06:12:53 -0300655 * for it. The caller has already verified memory and size.
Pawel Osciake23ccc02010-10-11 10:56:41 -0300656 */
Hans Verkuil3c0b6062012-09-28 06:24:18 -0300657 b->length = vb->num_planes;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300658 memcpy(b->m.planes, vb->v4l2_planes,
659 b->length * sizeof(struct v4l2_plane));
660 } else {
661 /*
662 * We use length and offset in v4l2_planes array even for
663 * single-planar buffers, but userspace does not.
664 */
665 b->length = vb->v4l2_planes[0].length;
666 b->bytesused = vb->v4l2_planes[0].bytesused;
667 if (q->memory == V4L2_MEMORY_MMAP)
668 b->m.offset = vb->v4l2_planes[0].m.mem_offset;
669 else if (q->memory == V4L2_MEMORY_USERPTR)
670 b->m.userptr = vb->v4l2_planes[0].m.userptr;
Sumit Semwalc5384042012-06-14 10:37:37 -0300671 else if (q->memory == V4L2_MEMORY_DMABUF)
672 b->m.fd = vb->v4l2_planes[0].m.fd;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300673 }
674
Marek Szyprowskiea42c8e2011-04-12 10:14:13 -0300675 /*
676 * Clear any buffer state related flags.
677 */
Sakari Ailus1b18e7a2012-10-22 17:10:16 -0300678 b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
Sakari Ailus7ce6fd82014-02-25 19:08:52 -0300679 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
680 if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
681 V4L2_BUF_FLAG_TIMESTAMP_COPY) {
682 /*
683 * For non-COPY timestamps, drop timestamp source bits
684 * and obtain the timestamp source from the queue.
685 */
686 b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
687 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
688 }
Pawel Osciake23ccc02010-10-11 10:56:41 -0300689
690 switch (vb->state) {
691 case VB2_BUF_STATE_QUEUED:
692 case VB2_BUF_STATE_ACTIVE:
693 b->flags |= V4L2_BUF_FLAG_QUEUED;
694 break;
695 case VB2_BUF_STATE_ERROR:
696 b->flags |= V4L2_BUF_FLAG_ERROR;
697 /* fall through */
698 case VB2_BUF_STATE_DONE:
699 b->flags |= V4L2_BUF_FLAG_DONE;
700 break;
Guennadi Liakhovetskiebc087d2011-08-31 06:51:10 -0300701 case VB2_BUF_STATE_PREPARED:
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300702 b->flags |= V4L2_BUF_FLAG_PREPARED;
703 break;
Hans Verkuilb18a8ff2013-12-13 13:13:38 -0300704 case VB2_BUF_STATE_PREPARING:
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300705 case VB2_BUF_STATE_DEQUEUED:
Pawel Osciake23ccc02010-10-11 10:56:41 -0300706 /* nothing */
707 break;
708 }
709
Marek Szyprowski25a27d92011-08-24 06:49:35 -0300710 if (__buffer_in_use(q, vb))
Pawel Osciake23ccc02010-10-11 10:56:41 -0300711 b->flags |= V4L2_BUF_FLAG_MAPPED;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300712}
713
714/**
715 * vb2_querybuf() - query video buffer information
716 * @q: videobuf queue
717 * @b: buffer struct passed from userspace to vidioc_querybuf handler
718 * in driver
719 *
720 * Should be called from vidioc_querybuf ioctl handler in driver.
721 * This function will verify the passed v4l2_buffer structure and fill the
722 * relevant information for the userspace.
723 *
724 * The return values from this function are intended to be directly returned
725 * from vidioc_querybuf handler in driver.
726 */
727int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
728{
729 struct vb2_buffer *vb;
Hans Verkuil32a77262012-09-28 06:12:53 -0300730 int ret;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300731
732 if (b->type != q->type) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300733 dprintk(1, "wrong buffer type\n");
Pawel Osciake23ccc02010-10-11 10:56:41 -0300734 return -EINVAL;
735 }
736
737 if (b->index >= q->num_buffers) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300738 dprintk(1, "buffer index out of range\n");
Pawel Osciake23ccc02010-10-11 10:56:41 -0300739 return -EINVAL;
740 }
741 vb = q->bufs[b->index];
Hans Verkuil32a77262012-09-28 06:12:53 -0300742 ret = __verify_planes_array(vb, b);
743 if (!ret)
744 __fill_v4l2_buffer(vb, b);
745 return ret;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300746}
747EXPORT_SYMBOL(vb2_querybuf);
748
749/**
750 * __verify_userptr_ops() - verify that all memory operations required for
751 * USERPTR queue type have been provided
752 */
753static int __verify_userptr_ops(struct vb2_queue *q)
754{
755 if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
756 !q->mem_ops->put_userptr)
757 return -EINVAL;
758
759 return 0;
760}
761
762/**
763 * __verify_mmap_ops() - verify that all memory operations required for
764 * MMAP queue type have been provided
765 */
766static int __verify_mmap_ops(struct vb2_queue *q)
767{
768 if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
769 !q->mem_ops->put || !q->mem_ops->mmap)
770 return -EINVAL;
771
772 return 0;
773}
774
775/**
Sumit Semwalc5384042012-06-14 10:37:37 -0300776 * __verify_dmabuf_ops() - verify that all memory operations required for
777 * DMABUF queue type have been provided
778 */
779static int __verify_dmabuf_ops(struct vb2_queue *q)
780{
781 if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
782 !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
783 !q->mem_ops->unmap_dmabuf)
784 return -EINVAL;
785
786 return 0;
787}
788
789/**
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300790 * __verify_memory_type() - Check whether the memory type and buffer type
791 * passed to a buffer operation are compatible with the queue.
792 */
793static int __verify_memory_type(struct vb2_queue *q,
794 enum v4l2_memory memory, enum v4l2_buf_type type)
795{
Sumit Semwalc5384042012-06-14 10:37:37 -0300796 if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&
797 memory != V4L2_MEMORY_DMABUF) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300798 dprintk(1, "unsupported memory type\n");
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300799 return -EINVAL;
800 }
801
802 if (type != q->type) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300803 dprintk(1, "requested type is incorrect\n");
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300804 return -EINVAL;
805 }
806
807 /*
808 * Make sure all the required memory ops for given memory type
809 * are available.
810 */
811 if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300812 dprintk(1, "MMAP for current setup unsupported\n");
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300813 return -EINVAL;
814 }
815
816 if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300817 dprintk(1, "USERPTR for current setup unsupported\n");
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300818 return -EINVAL;
819 }
820
Sumit Semwalc5384042012-06-14 10:37:37 -0300821 if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300822 dprintk(1, "DMABUF for current setup unsupported\n");
Sumit Semwalc5384042012-06-14 10:37:37 -0300823 return -EINVAL;
824 }
825
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300826 /*
827 * Place the busy tests at the end: -EBUSY can be ignored when
828 * create_bufs is called with count == 0, but count == 0 should still
829 * do the memory and type validation.
830 */
Hans Verkuil74753cffa2014-04-07 09:23:50 -0300831 if (vb2_fileio_is_active(q)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300832 dprintk(1, "file io in progress\n");
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300833 return -EBUSY;
834 }
835 return 0;
836}
837
838/**
839 * __reqbufs() - Initiate streaming
Pawel Osciake23ccc02010-10-11 10:56:41 -0300840 * @q: videobuf2 queue
841 * @req: struct passed from userspace to vidioc_reqbufs handler in driver
842 *
843 * Should be called from vidioc_reqbufs ioctl handler of a driver.
844 * This function:
845 * 1) verifies streaming parameters passed from the userspace,
846 * 2) sets up the queue,
847 * 3) negotiates number of buffers and planes per buffer with the driver
848 * to be used during streaming,
849 * 4) allocates internal buffer structures (struct vb2_buffer), according to
850 * the agreed parameters,
851 * 5) for MMAP memory type, allocates actual video memory, using the
852 * memory handling/allocation routines provided during queue initialization
853 *
854 * If req->count is 0, all the memory will be freed instead.
855 * If the queue has been allocated previously (by a previous vb2_reqbufs) call
856 * and the queue is not busy, memory will be reallocated.
857 *
858 * The return values from this function are intended to be directly returned
859 * from vidioc_reqbufs handler in driver.
860 */
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300861static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300862{
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300863 unsigned int num_buffers, allocated_buffers, num_planes = 0;
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300864 int ret;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300865
866 if (q->streaming) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300867 dprintk(1, "streaming active\n");
Pawel Osciake23ccc02010-10-11 10:56:41 -0300868 return -EBUSY;
869 }
870
Marek Szyprowski29e3fbd2011-03-09 14:03:24 -0300871 if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
Pawel Osciake23ccc02010-10-11 10:56:41 -0300872 /*
873 * We already have buffers allocated, so first check if they
874 * are not in use and can be freed.
875 */
876 if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300877 dprintk(1, "memory in use, cannot free\n");
Pawel Osciake23ccc02010-10-11 10:56:41 -0300878 return -EBUSY;
879 }
880
Hans Verkuilfb64dca2014-02-28 12:49:18 -0300881 /*
882 * Call queue_cancel to clean up any buffers in the PREPARED or
883 * QUEUED state which is possible if buffers were prepared or
884 * queued without ever calling STREAMON.
885 */
886 __vb2_queue_cancel(q);
Hans Verkuil63faabf2013-12-13 13:13:40 -0300887 ret = __vb2_queue_free(q, q->num_buffers);
888 if (ret)
889 return ret;
Marek Szyprowski29e3fbd2011-03-09 14:03:24 -0300890
891 /*
892 * In case of REQBUFS(0) return immediately without calling
893 * driver's queue_setup() callback and allocating resources.
894 */
895 if (req->count == 0)
896 return 0;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300897 }
898
899 /*
900 * Make sure the requested values and current defaults are sane.
901 */
902 num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
Hans Verkuilb3379c62014-02-24 13:51:03 -0300903 num_buffers = max_t(unsigned int, req->count, q->min_buffers_needed);
Marek Szyprowskic1426bc2011-08-24 06:36:26 -0300904 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
Pawel Osciake23ccc02010-10-11 10:56:41 -0300905 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
Marek Szyprowski13b14092011-04-14 07:17:44 -0300906 q->memory = req->memory;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300907
908 /*
909 * Ask the driver how many buffers and planes per buffer it requires.
910 * Driver also sets the size and allocator context for each plane.
911 */
Guennadi Liakhovetskifc714e72011-08-24 10:30:21 -0300912 ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes,
Marek Szyprowskic1426bc2011-08-24 06:36:26 -0300913 q->plane_sizes, q->alloc_ctx);
Hans Verkuila1d36d82014-03-17 09:54:21 -0300914 if (ret)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300915 return ret;
916
917 /* Finally, allocate buffers and video memory */
Hans Verkuila7afcac2014-02-24 13:41:20 -0300918 allocated_buffers = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
919 if (allocated_buffers == 0) {
Marek Szyprowski66072d42011-06-28 08:29:02 -0300920 dprintk(1, "Memory allocation failed\n");
921 return -ENOMEM;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300922 }
923
924 /*
Hans Verkuilb3379c62014-02-24 13:51:03 -0300925 * There is no point in continuing if we can't allocate the minimum
926 * number of buffers needed by this vb2_queue.
927 */
928 if (allocated_buffers < q->min_buffers_needed)
929 ret = -ENOMEM;
930
931 /*
Pawel Osciake23ccc02010-10-11 10:56:41 -0300932 * Check if driver can handle the allocated number of buffers.
933 */
Hans Verkuilb3379c62014-02-24 13:51:03 -0300934 if (!ret && allocated_buffers < num_buffers) {
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300935 num_buffers = allocated_buffers;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300936
Guennadi Liakhovetskifc714e72011-08-24 10:30:21 -0300937 ret = call_qop(q, queue_setup, q, NULL, &num_buffers,
938 &num_planes, q->plane_sizes, q->alloc_ctx);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300939
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300940 if (!ret && allocated_buffers < num_buffers)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300941 ret = -ENOMEM;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300942
943 /*
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300944 * Either the driver has accepted a smaller number of buffers,
945 * or .queue_setup() returned an error
Pawel Osciake23ccc02010-10-11 10:56:41 -0300946 */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300947 }
948
949 q->num_buffers = allocated_buffers;
950
951 if (ret < 0) {
Hans Verkuila7afcac2014-02-24 13:41:20 -0300952 /*
953 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
954 * from q->num_buffers.
955 */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300956 __vb2_queue_free(q, allocated_buffers);
957 return ret;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300958 }
959
Pawel Osciake23ccc02010-10-11 10:56:41 -0300960 /*
961 * Return the number of successfully allocated buffers
962 * to the userspace.
963 */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300964 req->count = allocated_buffers;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300965
966 return 0;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300967}
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300968
969/**
970 * vb2_reqbufs() - Wrapper for __reqbufs() that also verifies the memory and
971 * type values.
972 * @q: videobuf2 queue
973 * @req: struct passed from userspace to vidioc_reqbufs handler in driver
974 */
975int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
976{
977 int ret = __verify_memory_type(q, req->memory, req->type);
978
979 return ret ? ret : __reqbufs(q, req);
980}
Pawel Osciake23ccc02010-10-11 10:56:41 -0300981EXPORT_SYMBOL_GPL(vb2_reqbufs);
982
983/**
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300984 * __create_bufs() - Allocate buffers and any required auxiliary structs
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300985 * @q: videobuf2 queue
986 * @create: creation parameters, passed from userspace to vidioc_create_bufs
987 * handler in driver
988 *
989 * Should be called from vidioc_create_bufs ioctl handler of a driver.
990 * This function:
991 * 1) verifies parameter sanity
992 * 2) calls the .queue_setup() queue operation
993 * 3) performs any necessary memory allocations
994 *
995 * The return values from this function are intended to be directly returned
996 * from vidioc_create_bufs handler in driver.
997 */
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300998static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300999{
1000 unsigned int num_planes = 0, num_buffers, allocated_buffers;
Hans Verkuil37d9ed92012-06-27 17:10:30 -03001001 int ret;
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001002
1003 if (q->num_buffers == VIDEO_MAX_FRAME) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001004 dprintk(1, "maximum number of buffers already allocated\n");
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001005 return -ENOBUFS;
1006 }
1007
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001008 if (!q->num_buffers) {
1009 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
1010 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
1011 q->memory = create->memory;
1012 }
1013
1014 num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
1015
1016 /*
1017 * Ask the driver, whether the requested number of buffers, planes per
1018 * buffer and their sizes are acceptable
1019 */
1020 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
1021 &num_planes, q->plane_sizes, q->alloc_ctx);
Hans Verkuila1d36d82014-03-17 09:54:21 -03001022 if (ret)
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001023 return ret;
1024
1025 /* Finally, allocate buffers and video memory */
Hans Verkuila7afcac2014-02-24 13:41:20 -03001026 allocated_buffers = __vb2_queue_alloc(q, create->memory, num_buffers,
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001027 num_planes);
Hans Verkuila7afcac2014-02-24 13:41:20 -03001028 if (allocated_buffers == 0) {
Hans Verkuilf05393d22012-06-22 05:44:14 -03001029 dprintk(1, "Memory allocation failed\n");
1030 return -ENOMEM;
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001031 }
1032
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001033 /*
1034 * Check if driver can handle the so far allocated number of buffers.
1035 */
Hans Verkuila7afcac2014-02-24 13:41:20 -03001036 if (allocated_buffers < num_buffers) {
1037 num_buffers = allocated_buffers;
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001038
1039 /*
1040 * q->num_buffers contains the total number of buffers, that the
1041 * queue driver has set up
1042 */
1043 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
1044 &num_planes, q->plane_sizes, q->alloc_ctx);
1045
1046 if (!ret && allocated_buffers < num_buffers)
1047 ret = -ENOMEM;
1048
1049 /*
1050 * Either the driver has accepted a smaller number of buffers,
1051 * or .queue_setup() returned an error
1052 */
1053 }
1054
1055 q->num_buffers += allocated_buffers;
1056
1057 if (ret < 0) {
Hans Verkuila7afcac2014-02-24 13:41:20 -03001058 /*
1059 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
1060 * from q->num_buffers.
1061 */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001062 __vb2_queue_free(q, allocated_buffers);
Hans Verkuilf05393d22012-06-22 05:44:14 -03001063 return -ENOMEM;
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001064 }
1065
1066 /*
1067 * Return the number of successfully allocated buffers
1068 * to the userspace.
1069 */
1070 create->count = allocated_buffers;
1071
1072 return 0;
1073}
Hans Verkuil37d9ed92012-06-27 17:10:30 -03001074
1075/**
Nicolas THERY53aa3b12012-07-20 09:25:37 -03001076 * vb2_create_bufs() - Wrapper for __create_bufs() that also verifies the
1077 * memory and type values.
Hans Verkuil37d9ed92012-06-27 17:10:30 -03001078 * @q: videobuf2 queue
1079 * @create: creation parameters, passed from userspace to vidioc_create_bufs
1080 * handler in driver
1081 */
1082int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
1083{
1084 int ret = __verify_memory_type(q, create->memory, create->format.type);
1085
1086 create->index = q->num_buffers;
Hans Verkuilf05393d22012-06-22 05:44:14 -03001087 if (create->count == 0)
1088 return ret != -EBUSY ? ret : 0;
Hans Verkuil37d9ed92012-06-27 17:10:30 -03001089 return ret ? ret : __create_bufs(q, create);
1090}
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001091EXPORT_SYMBOL_GPL(vb2_create_bufs);
1092
1093/**
Pawel Osciake23ccc02010-10-11 10:56:41 -03001094 * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
1095 * @vb: vb2_buffer to which the plane in question belongs to
1096 * @plane_no: plane number for which the address is to be returned
1097 *
1098 * This function returns a kernel virtual address of a given plane if
1099 * such a mapping exist, NULL otherwise.
1100 */
1101void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
1102{
Marek Szyprowskia00d0262011-12-15 05:53:06 -03001103 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
Pawel Osciake23ccc02010-10-11 10:56:41 -03001104 return NULL;
1105
Hans Verkuila1d36d82014-03-17 09:54:21 -03001106 return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
Pawel Osciake23ccc02010-10-11 10:56:41 -03001107
1108}
1109EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
1110
1111/**
1112 * vb2_plane_cookie() - Return allocator specific cookie for the given plane
1113 * @vb: vb2_buffer to which the plane in question belongs to
1114 * @plane_no: plane number for which the cookie is to be returned
1115 *
1116 * This function returns an allocator specific cookie for a given plane if
1117 * available, NULL otherwise. The allocator should provide some simple static
1118 * inline function, which would convert this cookie to the allocator specific
1119 * type that can be used directly by the driver to access the buffer. This can
1120 * be for example physical address, pointer to scatter list or IOMMU mapping.
1121 */
1122void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
1123{
Marek Szyprowskia00d0262011-12-15 05:53:06 -03001124 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
Pawel Osciake23ccc02010-10-11 10:56:41 -03001125 return NULL;
1126
Hans Verkuila1d36d82014-03-17 09:54:21 -03001127 return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
Pawel Osciake23ccc02010-10-11 10:56:41 -03001128}
1129EXPORT_SYMBOL_GPL(vb2_plane_cookie);
1130
1131/**
1132 * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
1133 * @vb: vb2_buffer returned from the driver
1134 * @state: either VB2_BUF_STATE_DONE if the operation finished successfully
Hans Verkuilb3379c62014-02-24 13:51:03 -03001135 * or VB2_BUF_STATE_ERROR if the operation finished with an error.
1136 * If start_streaming fails then it should return buffers with state
1137 * VB2_BUF_STATE_QUEUED to put them back into the queue.
Pawel Osciake23ccc02010-10-11 10:56:41 -03001138 *
1139 * This function should be called by the driver after a hardware operation on
1140 * a buffer is finished and the buffer may be returned to userspace. The driver
1141 * cannot use this buffer anymore until it is queued back to it by videobuf
1142 * by the means of buf_queue callback. Only buffers previously queued to the
1143 * driver by buf_queue can be passed to this function.
Hans Verkuilb3379c62014-02-24 13:51:03 -03001144 *
1145 * While streaming a buffer can only be returned in state DONE or ERROR.
1146 * The start_streaming op can also return them in case the DMA engine cannot
1147 * be started for some reason. In that case the buffers should be returned with
1148 * state QUEUED.
Pawel Osciake23ccc02010-10-11 10:56:41 -03001149 */
1150void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
1151{
1152 struct vb2_queue *q = vb->vb2_queue;
1153 unsigned long flags;
Marek Szyprowski3e0c2f22012-06-14 10:37:43 -03001154 unsigned int plane;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001155
Hans Verkuilb3379c62014-02-24 13:51:03 -03001156 if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
Pawel Osciake23ccc02010-10-11 10:56:41 -03001157 return;
1158
Hans Verkuilb3379c62014-02-24 13:51:03 -03001159 if (!q->start_streaming_called) {
1160 if (WARN_ON(state != VB2_BUF_STATE_QUEUED))
1161 state = VB2_BUF_STATE_QUEUED;
Hans Verkuil57394b72014-02-24 15:52:04 -03001162 } else if (WARN_ON(state != VB2_BUF_STATE_DONE &&
1163 state != VB2_BUF_STATE_ERROR)) {
Hans Verkuilb3379c62014-02-24 13:51:03 -03001164 state = VB2_BUF_STATE_ERROR;
1165 }
Pawel Osciake23ccc02010-10-11 10:56:41 -03001166
Hans Verkuilb5b45412014-01-29 11:53:25 -03001167#ifdef CONFIG_VIDEO_ADV_DEBUG
1168 /*
1169 * Although this is not a callback, it still does have to balance
1170 * with the buf_queue op. So update this counter manually.
1171 */
1172 vb->cnt_buf_done++;
1173#endif
Pawel Osciake23ccc02010-10-11 10:56:41 -03001174 dprintk(4, "Done processing on buffer %d, state: %d\n",
Tushar Behera9b6f5dc2012-11-12 04:01:29 -03001175 vb->v4l2_buf.index, state);
Pawel Osciake23ccc02010-10-11 10:56:41 -03001176
Marek Szyprowski3e0c2f22012-06-14 10:37:43 -03001177 /* sync buffers */
1178 for (plane = 0; plane < vb->num_planes; ++plane)
Hans Verkuila1d36d82014-03-17 09:54:21 -03001179 call_void_memop(vb, finish, vb->planes[plane].mem_priv);
Marek Szyprowski3e0c2f22012-06-14 10:37:43 -03001180
Pawel Osciake23ccc02010-10-11 10:56:41 -03001181 /* Add the buffer to the done buffers list */
1182 spin_lock_irqsave(&q->done_lock, flags);
1183 vb->state = state;
Hans Verkuilb3379c62014-02-24 13:51:03 -03001184 if (state != VB2_BUF_STATE_QUEUED)
1185 list_add_tail(&vb->done_entry, &q->done_list);
Hans Verkuil6ea3b982014-02-06 05:46:11 -03001186 atomic_dec(&q->owned_by_drv_count);
Pawel Osciake23ccc02010-10-11 10:56:41 -03001187 spin_unlock_irqrestore(&q->done_lock, flags);
1188
Hans Verkuilb3379c62014-02-24 13:51:03 -03001189 if (state == VB2_BUF_STATE_QUEUED)
1190 return;
1191
Pawel Osciake23ccc02010-10-11 10:56:41 -03001192 /* Inform any processes that may be waiting for buffers */
1193 wake_up(&q->done_wq);
1194}
1195EXPORT_SYMBOL_GPL(vb2_buffer_done);
1196
1197/**
Hans Verkuil32a77262012-09-28 06:12:53 -03001198 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
1199 * v4l2_buffer by the userspace. The caller has already verified that struct
1200 * v4l2_buffer has a valid number of planes.
Pawel Osciake23ccc02010-10-11 10:56:41 -03001201 */
Hans Verkuil32a77262012-09-28 06:12:53 -03001202static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
Pawel Osciake23ccc02010-10-11 10:56:41 -03001203 struct v4l2_plane *v4l2_planes)
1204{
1205 unsigned int plane;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001206
1207 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
Pawel Osciake23ccc02010-10-11 10:56:41 -03001208 /* Fill in driver-provided information for OUTPUT types */
1209 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
Hans Verkuil61bd8fb2014-04-07 08:57:48 -03001210 bool bytesused_is_used;
1211
1212 /* Check if bytesused == 0 for all planes */
1213 for (plane = 0; plane < vb->num_planes; ++plane)
1214 if (b->m.planes[plane].bytesused)
1215 break;
1216 bytesused_is_used = plane < vb->num_planes;
1217
Pawel Osciake23ccc02010-10-11 10:56:41 -03001218 /*
1219 * Will have to go up to b->length when API starts
1220 * accepting variable number of planes.
Hans Verkuil61bd8fb2014-04-07 08:57:48 -03001221 *
1222 * If bytesused_is_used is false, then fall back to the
1223 * full buffer size. In that case userspace clearly
1224 * never bothered to set it and it's a safe assumption
1225 * that they really meant to use the full plane sizes.
Pawel Osciake23ccc02010-10-11 10:56:41 -03001226 */
1227 for (plane = 0; plane < vb->num_planes; ++plane) {
Hans Verkuil61bd8fb2014-04-07 08:57:48 -03001228 struct v4l2_plane *pdst = &v4l2_planes[plane];
1229 struct v4l2_plane *psrc = &b->m.planes[plane];
1230
1231 pdst->bytesused = bytesused_is_used ?
1232 psrc->bytesused : psrc->length;
1233 pdst->data_offset = psrc->data_offset;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001234 }
1235 }
1236
1237 if (b->memory == V4L2_MEMORY_USERPTR) {
1238 for (plane = 0; plane < vb->num_planes; ++plane) {
1239 v4l2_planes[plane].m.userptr =
1240 b->m.planes[plane].m.userptr;
1241 v4l2_planes[plane].length =
1242 b->m.planes[plane].length;
1243 }
1244 }
Sumit Semwalc5384042012-06-14 10:37:37 -03001245 if (b->memory == V4L2_MEMORY_DMABUF) {
1246 for (plane = 0; plane < vb->num_planes; ++plane) {
1247 v4l2_planes[plane].m.fd =
1248 b->m.planes[plane].m.fd;
1249 v4l2_planes[plane].length =
1250 b->m.planes[plane].length;
Sumit Semwalc5384042012-06-14 10:37:37 -03001251 }
1252 }
Pawel Osciake23ccc02010-10-11 10:56:41 -03001253 } else {
1254 /*
1255 * Single-planar buffers do not use planes array,
1256 * so fill in relevant v4l2_buffer struct fields instead.
1257 * In videobuf we use our internal V4l2_planes struct for
1258 * single-planar buffers as well, for simplicity.
Hans Verkuil61bd8fb2014-04-07 08:57:48 -03001259 *
1260 * If bytesused == 0, then fall back to the full buffer size
1261 * as that's a sensible default.
Pawel Osciake23ccc02010-10-11 10:56:41 -03001262 */
Hans Verkuil412376a2014-04-07 08:44:56 -03001263 if (V4L2_TYPE_IS_OUTPUT(b->type))
Hans Verkuil61bd8fb2014-04-07 08:57:48 -03001264 v4l2_planes[0].bytesused =
1265 b->bytesused ? b->bytesused : b->length;
1266 else
1267 v4l2_planes[0].bytesused = 0;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001268
1269 if (b->memory == V4L2_MEMORY_USERPTR) {
1270 v4l2_planes[0].m.userptr = b->m.userptr;
1271 v4l2_planes[0].length = b->length;
1272 }
Sumit Semwalc5384042012-06-14 10:37:37 -03001273
1274 if (b->memory == V4L2_MEMORY_DMABUF) {
1275 v4l2_planes[0].m.fd = b->m.fd;
1276 v4l2_planes[0].length = b->length;
Sumit Semwalc5384042012-06-14 10:37:37 -03001277 }
Pawel Osciake23ccc02010-10-11 10:56:41 -03001278 }
1279
Hans Verkuilf1343282014-02-24 14:44:50 -03001280 /* Zero flags that the vb2 core handles */
Sakari Ailus1b18e7a2012-10-22 17:10:16 -03001281 vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
Sakari Ailus7ce6fd82014-02-25 19:08:52 -03001282 if ((vb->vb2_queue->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
1283 V4L2_BUF_FLAG_TIMESTAMP_COPY || !V4L2_TYPE_IS_OUTPUT(b->type)) {
1284 /*
1285 * Non-COPY timestamps and non-OUTPUT queues will get
1286 * their timestamp and timestamp source flags from the
1287 * queue.
1288 */
1289 vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1290 }
1291
Hans Verkuilf1343282014-02-24 14:44:50 -03001292 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
1293 /*
1294 * For output buffers mask out the timecode flag:
1295 * this will be handled later in vb2_internal_qbuf().
1296 * The 'field' is valid metadata for this output buffer
1297 * and so that needs to be copied here.
1298 */
1299 vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TIMECODE;
1300 vb->v4l2_buf.field = b->field;
1301 } else {
1302 /* Zero any output buffer flags as this is a capture buffer */
1303 vb->v4l2_buf.flags &= ~V4L2_BUFFER_OUT_FLAGS;
1304 }
Pawel Osciake23ccc02010-10-11 10:56:41 -03001305}
1306
1307/**
Hans Verkuildcc24282014-03-10 12:23:13 -03001308 * __qbuf_mmap() - handle qbuf of an MMAP buffer
1309 */
1310static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1311{
1312 __fill_vb2_buffer(vb, b, vb->v4l2_planes);
1313 return call_vb_qop(vb, buf_prepare, vb);
1314}
1315
1316/**
Pawel Osciake23ccc02010-10-11 10:56:41 -03001317 * __qbuf_userptr() - handle qbuf of a USERPTR buffer
1318 */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001319static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
Pawel Osciake23ccc02010-10-11 10:56:41 -03001320{
1321 struct v4l2_plane planes[VIDEO_MAX_PLANES];
1322 struct vb2_queue *q = vb->vb2_queue;
1323 void *mem_priv;
1324 unsigned int plane;
1325 int ret;
1326 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
Hans Verkuil256f3162014-01-29 13:36:53 -03001327 bool reacquired = vb->planes[0].mem_priv == NULL;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001328
Hans Verkuil412376a2014-04-07 08:44:56 -03001329 memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
Hans Verkuil32a77262012-09-28 06:12:53 -03001330 /* Copy relevant information provided by the userspace */
1331 __fill_vb2_buffer(vb, b, planes);
Pawel Osciake23ccc02010-10-11 10:56:41 -03001332
1333 for (plane = 0; plane < vb->num_planes; ++plane) {
1334 /* Skip the plane if already verified */
Marek Szyprowskif0b7c7f2011-11-16 15:09:40 -03001335 if (vb->v4l2_planes[plane].m.userptr &&
1336 vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
Pawel Osciake23ccc02010-10-11 10:56:41 -03001337 && vb->v4l2_planes[plane].length == planes[plane].length)
1338 continue;
1339
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001340 dprintk(3, "userspace address for plane %d changed, "
Pawel Osciake23ccc02010-10-11 10:56:41 -03001341 "reacquiring memory\n", plane);
1342
Marek Szyprowskic1426bc2011-08-24 06:36:26 -03001343 /* Check if the provided plane buffer is large enough */
1344 if (planes[plane].length < q->plane_sizes[plane]) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001345 dprintk(1, "provided buffer size %u is less than "
Seung-Woo Kim2484a7e2013-08-20 04:48:06 -03001346 "setup size %u for plane %d\n",
1347 planes[plane].length,
1348 q->plane_sizes[plane], plane);
Marek Szyprowski4c2625d2011-10-03 03:21:45 -03001349 ret = -EINVAL;
Marek Szyprowskic1426bc2011-08-24 06:36:26 -03001350 goto err;
1351 }
1352
Pawel Osciake23ccc02010-10-11 10:56:41 -03001353 /* Release previously acquired memory if present */
Hans Verkuil256f3162014-01-29 13:36:53 -03001354 if (vb->planes[plane].mem_priv) {
1355 if (!reacquired) {
1356 reacquired = true;
Hans Verkuila1d36d82014-03-17 09:54:21 -03001357 call_void_vb_qop(vb, buf_cleanup, vb);
Hans Verkuil256f3162014-01-29 13:36:53 -03001358 }
Hans Verkuila1d36d82014-03-17 09:54:21 -03001359 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
Hans Verkuil256f3162014-01-29 13:36:53 -03001360 }
Pawel Osciake23ccc02010-10-11 10:56:41 -03001361
1362 vb->planes[plane].mem_priv = NULL;
Hans Verkuil256f3162014-01-29 13:36:53 -03001363 memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
Pawel Osciake23ccc02010-10-11 10:56:41 -03001364
1365 /* Acquire each plane's memory */
Hans Verkuila1d36d82014-03-17 09:54:21 -03001366 mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane],
Marek Szyprowskia00d0262011-12-15 05:53:06 -03001367 planes[plane].m.userptr,
1368 planes[plane].length, write);
1369 if (IS_ERR_OR_NULL(mem_priv)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001370 dprintk(1, "failed acquiring userspace "
Pawel Osciake23ccc02010-10-11 10:56:41 -03001371 "memory for plane %d\n", plane);
Marek Szyprowskia00d0262011-12-15 05:53:06 -03001372 ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
1373 goto err;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001374 }
Marek Szyprowskia00d0262011-12-15 05:53:06 -03001375 vb->planes[plane].mem_priv = mem_priv;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001376 }
1377
1378 /*
Pawel Osciake23ccc02010-10-11 10:56:41 -03001379 * Now that everything is in order, copy relevant information
1380 * provided by userspace.
1381 */
1382 for (plane = 0; plane < vb->num_planes; ++plane)
1383 vb->v4l2_planes[plane] = planes[plane];
1384
Hans Verkuil256f3162014-01-29 13:36:53 -03001385 if (reacquired) {
1386 /*
1387 * One or more planes changed, so we must call buf_init to do
1388 * the driver-specific initialization on the newly acquired
1389 * buffer, if provided.
1390 */
1391 ret = call_vb_qop(vb, buf_init, vb);
1392 if (ret) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001393 dprintk(1, "buffer initialization failed\n");
Hans Verkuil256f3162014-01-29 13:36:53 -03001394 goto err;
1395 }
1396 }
1397
1398 ret = call_vb_qop(vb, buf_prepare, vb);
1399 if (ret) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001400 dprintk(1, "buffer preparation failed\n");
Hans Verkuila1d36d82014-03-17 09:54:21 -03001401 call_void_vb_qop(vb, buf_cleanup, vb);
Hans Verkuil256f3162014-01-29 13:36:53 -03001402 goto err;
1403 }
1404
Pawel Osciake23ccc02010-10-11 10:56:41 -03001405 return 0;
1406err:
1407 /* In case of errors, release planes that were already acquired */
Marek Szyprowskic1426bc2011-08-24 06:36:26 -03001408 for (plane = 0; plane < vb->num_planes; ++plane) {
1409 if (vb->planes[plane].mem_priv)
Hans Verkuila1d36d82014-03-17 09:54:21 -03001410 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
Marek Szyprowskic1426bc2011-08-24 06:36:26 -03001411 vb->planes[plane].mem_priv = NULL;
1412 vb->v4l2_planes[plane].m.userptr = 0;
1413 vb->v4l2_planes[plane].length = 0;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001414 }
1415
1416 return ret;
1417}
1418
1419/**
Sumit Semwalc5384042012-06-14 10:37:37 -03001420 * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
1421 */
1422static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1423{
1424 struct v4l2_plane planes[VIDEO_MAX_PLANES];
1425 struct vb2_queue *q = vb->vb2_queue;
1426 void *mem_priv;
1427 unsigned int plane;
1428 int ret;
1429 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
Hans Verkuil256f3162014-01-29 13:36:53 -03001430 bool reacquired = vb->planes[0].mem_priv == NULL;
Sumit Semwalc5384042012-06-14 10:37:37 -03001431
Hans Verkuil412376a2014-04-07 08:44:56 -03001432 memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
Laurent Pinchart6f546c52014-01-01 09:10:48 -03001433 /* Copy relevant information provided by the userspace */
Sumit Semwalc5384042012-06-14 10:37:37 -03001434 __fill_vb2_buffer(vb, b, planes);
1435
1436 for (plane = 0; plane < vb->num_planes; ++plane) {
1437 struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
1438
1439 if (IS_ERR_OR_NULL(dbuf)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001440 dprintk(1, "invalid dmabuf fd for plane %d\n",
Sumit Semwalc5384042012-06-14 10:37:37 -03001441 plane);
1442 ret = -EINVAL;
1443 goto err;
1444 }
1445
1446 /* use DMABUF size if length is not provided */
1447 if (planes[plane].length == 0)
1448 planes[plane].length = dbuf->size;
1449
Hans Verkuil412376a2014-04-07 08:44:56 -03001450 if (planes[plane].length < q->plane_sizes[plane]) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001451 dprintk(1, "invalid dmabuf length for plane %d\n",
Seung-Woo Kim77c07822013-11-29 04:50:29 -03001452 plane);
Sumit Semwalc5384042012-06-14 10:37:37 -03001453 ret = -EINVAL;
1454 goto err;
1455 }
1456
1457 /* Skip the plane if already verified */
1458 if (dbuf == vb->planes[plane].dbuf &&
1459 vb->v4l2_planes[plane].length == planes[plane].length) {
1460 dma_buf_put(dbuf);
1461 continue;
1462 }
1463
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001464 dprintk(1, "buffer for plane %d changed\n", plane);
Sumit Semwalc5384042012-06-14 10:37:37 -03001465
Hans Verkuil256f3162014-01-29 13:36:53 -03001466 if (!reacquired) {
1467 reacquired = true;
Hans Verkuila1d36d82014-03-17 09:54:21 -03001468 call_void_vb_qop(vb, buf_cleanup, vb);
Hans Verkuil256f3162014-01-29 13:36:53 -03001469 }
1470
Sumit Semwalc5384042012-06-14 10:37:37 -03001471 /* Release previously acquired memory if present */
Hans Verkuilb5b45412014-01-29 11:53:25 -03001472 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
Sumit Semwalc5384042012-06-14 10:37:37 -03001473 memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
1474
1475 /* Acquire each plane's memory */
Hans Verkuila1d36d82014-03-17 09:54:21 -03001476 mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane],
Sumit Semwalc5384042012-06-14 10:37:37 -03001477 dbuf, planes[plane].length, write);
1478 if (IS_ERR(mem_priv)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001479 dprintk(1, "failed to attach dmabuf\n");
Sumit Semwalc5384042012-06-14 10:37:37 -03001480 ret = PTR_ERR(mem_priv);
1481 dma_buf_put(dbuf);
1482 goto err;
1483 }
1484
1485 vb->planes[plane].dbuf = dbuf;
1486 vb->planes[plane].mem_priv = mem_priv;
1487 }
1488
1489 /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but
1490 * really we want to do this just before the DMA, not while queueing
1491 * the buffer(s)..
1492 */
1493 for (plane = 0; plane < vb->num_planes; ++plane) {
Hans Verkuilb5b45412014-01-29 11:53:25 -03001494 ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
Sumit Semwalc5384042012-06-14 10:37:37 -03001495 if (ret) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001496 dprintk(1, "failed to map dmabuf for plane %d\n",
Sumit Semwalc5384042012-06-14 10:37:37 -03001497 plane);
1498 goto err;
1499 }
1500 vb->planes[plane].dbuf_mapped = 1;
1501 }
1502
1503 /*
Sumit Semwalc5384042012-06-14 10:37:37 -03001504 * Now that everything is in order, copy relevant information
1505 * provided by userspace.
1506 */
1507 for (plane = 0; plane < vb->num_planes; ++plane)
1508 vb->v4l2_planes[plane] = planes[plane];
1509
Hans Verkuil256f3162014-01-29 13:36:53 -03001510 if (reacquired) {
1511 /*
1512 * Call driver-specific initialization on the newly acquired buffer,
1513 * if provided.
1514 */
1515 ret = call_vb_qop(vb, buf_init, vb);
1516 if (ret) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001517 dprintk(1, "buffer initialization failed\n");
Hans Verkuil256f3162014-01-29 13:36:53 -03001518 goto err;
1519 }
1520 }
1521
1522 ret = call_vb_qop(vb, buf_prepare, vb);
1523 if (ret) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001524 dprintk(1, "buffer preparation failed\n");
Hans Verkuila1d36d82014-03-17 09:54:21 -03001525 call_void_vb_qop(vb, buf_cleanup, vb);
Hans Verkuil256f3162014-01-29 13:36:53 -03001526 goto err;
1527 }
1528
Sumit Semwalc5384042012-06-14 10:37:37 -03001529 return 0;
1530err:
1531 /* In case of errors, release planes that were already acquired */
1532 __vb2_buf_dmabuf_put(vb);
1533
1534 return ret;
1535}
1536
1537/**
Pawel Osciake23ccc02010-10-11 10:56:41 -03001538 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
1539 */
1540static void __enqueue_in_driver(struct vb2_buffer *vb)
1541{
1542 struct vb2_queue *q = vb->vb2_queue;
Marek Szyprowski3e0c2f22012-06-14 10:37:43 -03001543 unsigned int plane;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001544
1545 vb->state = VB2_BUF_STATE_ACTIVE;
Hans Verkuil6ea3b982014-02-06 05:46:11 -03001546 atomic_inc(&q->owned_by_drv_count);
Marek Szyprowski3e0c2f22012-06-14 10:37:43 -03001547
1548 /* sync buffers */
1549 for (plane = 0; plane < vb->num_planes; ++plane)
Hans Verkuila1d36d82014-03-17 09:54:21 -03001550 call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
Marek Szyprowski3e0c2f22012-06-14 10:37:43 -03001551
Hans Verkuila1d36d82014-03-17 09:54:21 -03001552 call_void_vb_qop(vb, buf_queue, vb);
Pawel Osciake23ccc02010-10-11 10:56:41 -03001553}
1554
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001555static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
Guennadi Liakhovetskiebc087d2011-08-31 06:51:10 -03001556{
1557 struct vb2_queue *q = vb->vb2_queue;
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001558 struct rw_semaphore *mmap_sem;
Guennadi Liakhovetskiebc087d2011-08-31 06:51:10 -03001559 int ret;
1560
Laurent Pinchart8023ed02012-07-10 10:41:40 -03001561 ret = __verify_length(vb, b);
Sylwester Nawrocki3a9621b2013-08-26 11:47:53 -03001562 if (ret < 0) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001563 dprintk(1, "plane parameters verification failed: %d\n", ret);
Laurent Pinchart8023ed02012-07-10 10:41:40 -03001564 return ret;
Sylwester Nawrocki3a9621b2013-08-26 11:47:53 -03001565 }
Hans Verkuile35e41b2014-04-07 09:20:39 -03001566 if (b->field == V4L2_FIELD_ALTERNATE && V4L2_TYPE_IS_OUTPUT(q->type)) {
1567 /*
1568 * If the format's field is ALTERNATE, then the buffer's field
1569 * should be either TOP or BOTTOM, not ALTERNATE since that
1570 * makes no sense. The driver has to know whether the
1571 * buffer represents a top or a bottom field in order to
1572 * program any DMA correctly. Using ALTERNATE is wrong, since
1573 * that just says that it is either a top or a bottom field,
1574 * but not which of the two it is.
1575 */
1576 dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
1577 return -EINVAL;
1578 }
Laurent Pinchart8023ed02012-07-10 10:41:40 -03001579
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001580 vb->state = VB2_BUF_STATE_PREPARING;
Hans Verkuilf1343282014-02-24 14:44:50 -03001581 vb->v4l2_buf.timestamp.tv_sec = 0;
1582 vb->v4l2_buf.timestamp.tv_usec = 0;
1583 vb->v4l2_buf.sequence = 0;
1584
Guennadi Liakhovetskiebc087d2011-08-31 06:51:10 -03001585 switch (q->memory) {
1586 case V4L2_MEMORY_MMAP:
1587 ret = __qbuf_mmap(vb, b);
1588 break;
1589 case V4L2_MEMORY_USERPTR:
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001590 /*
Mauro Carvalho Chehabf103b5d2014-01-07 07:03:09 -02001591 * In case of user pointer buffers vb2 allocators need to get
1592 * direct access to userspace pages. This requires getting
1593 * the mmap semaphore for read access in the current process
1594 * structure. The same semaphore is taken before calling mmap
1595 * operation, while both qbuf/prepare_buf and mmap are called
1596 * by the driver or v4l2 core with the driver's lock held.
1597 * To avoid an AB-BA deadlock (mmap_sem then driver's lock in
1598 * mmap and driver's lock then mmap_sem in qbuf/prepare_buf),
1599 * the videobuf2 core releases the driver's lock, takes
1600 * mmap_sem and then takes the driver's lock again.
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001601 */
1602 mmap_sem = &current->mm->mmap_sem;
Hans Verkuila1d36d82014-03-17 09:54:21 -03001603 call_void_qop(q, wait_prepare, q);
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001604 down_read(mmap_sem);
Hans Verkuila1d36d82014-03-17 09:54:21 -03001605 call_void_qop(q, wait_finish, q);
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001606
Guennadi Liakhovetskiebc087d2011-08-31 06:51:10 -03001607 ret = __qbuf_userptr(vb, b);
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001608
1609 up_read(mmap_sem);
Guennadi Liakhovetskiebc087d2011-08-31 06:51:10 -03001610 break;
Sumit Semwalc5384042012-06-14 10:37:37 -03001611 case V4L2_MEMORY_DMABUF:
1612 ret = __qbuf_dmabuf(vb, b);
1613 break;
Guennadi Liakhovetskiebc087d2011-08-31 06:51:10 -03001614 default:
1615 WARN(1, "Invalid queue type\n");
1616 ret = -EINVAL;
1617 }
1618
Guennadi Liakhovetskiebc087d2011-08-31 06:51:10 -03001619 if (ret)
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001620 dprintk(1, "buffer preparation failed: %d\n", ret);
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001621 vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;
Guennadi Liakhovetskiebc087d2011-08-31 06:51:10 -03001622
1623 return ret;
1624}
1625
Laurent Pinchart012043b2013-08-09 08:11:26 -03001626static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
Hans Verkuil41381112013-12-13 13:13:39 -03001627 const char *opname)
Laurent Pinchart012043b2013-08-09 08:11:26 -03001628{
Laurent Pinchart012043b2013-08-09 08:11:26 -03001629 if (b->type != q->type) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001630 dprintk(1, "%s: invalid buffer type\n", opname);
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001631 return -EINVAL;
Laurent Pinchart012043b2013-08-09 08:11:26 -03001632 }
1633
1634 if (b->index >= q->num_buffers) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001635 dprintk(1, "%s: buffer index out of range\n", opname);
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001636 return -EINVAL;
Laurent Pinchart012043b2013-08-09 08:11:26 -03001637 }
1638
Hans Verkuil41381112013-12-13 13:13:39 -03001639 if (q->bufs[b->index] == NULL) {
Laurent Pinchart012043b2013-08-09 08:11:26 -03001640 /* Should never happen */
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001641 dprintk(1, "%s: buffer is NULL\n", opname);
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001642 return -EINVAL;
Laurent Pinchart012043b2013-08-09 08:11:26 -03001643 }
1644
1645 if (b->memory != q->memory) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001646 dprintk(1, "%s: invalid memory type\n", opname);
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001647 return -EINVAL;
Laurent Pinchart012043b2013-08-09 08:11:26 -03001648 }
1649
Hans Verkuil41381112013-12-13 13:13:39 -03001650 return __verify_planes_array(q->bufs[b->index], b);
Laurent Pinchart012043b2013-08-09 08:11:26 -03001651}
1652
Pawel Osciake23ccc02010-10-11 10:56:41 -03001653/**
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001654 * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
1655 * @q: videobuf2 queue
1656 * @b: buffer structure passed from userspace to vidioc_prepare_buf
1657 * handler in driver
1658 *
1659 * Should be called from vidioc_prepare_buf ioctl handler of a driver.
1660 * This function:
1661 * 1) verifies the passed buffer,
1662 * 2) calls buf_prepare callback in the driver (if provided), in which
1663 * driver-specific buffer initialization can be performed,
1664 *
1665 * The return values from this function are intended to be directly returned
1666 * from vidioc_prepare_buf handler in driver.
1667 */
1668int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
1669{
Hans Verkuil41381112013-12-13 13:13:39 -03001670 struct vb2_buffer *vb;
Hans Verkuilb2f2f042013-12-13 13:13:41 -03001671 int ret;
Hans Verkuil41381112013-12-13 13:13:39 -03001672
Hans Verkuil74753cffa2014-04-07 09:23:50 -03001673 if (vb2_fileio_is_active(q)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001674 dprintk(1, "file io in progress\n");
Hans Verkuilb2f2f042013-12-13 13:13:41 -03001675 return -EBUSY;
1676 }
1677
1678 ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf");
Hans Verkuil41381112013-12-13 13:13:39 -03001679 if (ret)
1680 return ret;
1681
1682 vb = q->bufs[b->index];
1683 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001684 dprintk(1, "invalid buffer state %d\n",
Hans Verkuil41381112013-12-13 13:13:39 -03001685 vb->state);
1686 return -EINVAL;
1687 }
1688
1689 ret = __buf_prepare(vb, b);
1690 if (!ret) {
1691 /* Fill buffer information for the userspace */
1692 __fill_v4l2_buffer(vb, b);
1693
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001694 dprintk(1, "prepare of buffer %d succeeded\n", vb->v4l2_buf.index);
Hans Verkuil41381112013-12-13 13:13:39 -03001695 }
1696 return ret;
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001697}
1698EXPORT_SYMBOL_GPL(vb2_prepare_buf);
1699
Hans Verkuil02f142e2013-12-13 13:13:42 -03001700/**
1701 * vb2_start_streaming() - Attempt to start streaming.
1702 * @q: videobuf2 queue
1703 *
Hans Verkuilb3379c62014-02-24 13:51:03 -03001704 * Attempt to start streaming. When this function is called there must be
1705 * at least q->min_buffers_needed buffers queued up (i.e. the minimum
1706 * number of buffers required for the DMA engine to function). If the
1707 * @start_streaming op fails it is supposed to return all the driver-owned
1708 * buffers back to vb2 in state QUEUED. Check if that happened and if
1709 * not warn and reclaim them forcefully.
Hans Verkuil02f142e2013-12-13 13:13:42 -03001710 */
1711static int vb2_start_streaming(struct vb2_queue *q)
1712{
Hans Verkuilb3379c62014-02-24 13:51:03 -03001713 struct vb2_buffer *vb;
Hans Verkuil02f142e2013-12-13 13:13:42 -03001714 int ret;
1715
Hans Verkuil02f142e2013-12-13 13:13:42 -03001716 /*
Hans Verkuilb3379c62014-02-24 13:51:03 -03001717 * If any buffers were queued before streamon,
1718 * we can now pass them to driver for processing.
Hans Verkuil02f142e2013-12-13 13:13:42 -03001719 */
Hans Verkuilb3379c62014-02-24 13:51:03 -03001720 list_for_each_entry(vb, &q->queued_list, queued_entry)
1721 __enqueue_in_driver(vb);
1722
1723 /* Tell the driver to start streaming */
1724 ret = call_qop(q, start_streaming, q,
1725 atomic_read(&q->owned_by_drv_count));
1726 q->start_streaming_called = ret == 0;
1727 if (!ret)
Hans Verkuil02f142e2013-12-13 13:13:42 -03001728 return 0;
Hans Verkuilb3379c62014-02-24 13:51:03 -03001729
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001730 dprintk(1, "driver refused to start streaming\n");
Hans Verkuilb3379c62014-02-24 13:51:03 -03001731 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
1732 unsigned i;
1733
1734 /*
1735 * Forcefully reclaim buffers if the driver did not
1736 * correctly return them to vb2.
1737 */
1738 for (i = 0; i < q->num_buffers; ++i) {
1739 vb = q->bufs[i];
1740 if (vb->state == VB2_BUF_STATE_ACTIVE)
1741 vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
1742 }
1743 /* Must be zero now */
1744 WARN_ON(atomic_read(&q->owned_by_drv_count));
Hans Verkuil02f142e2013-12-13 13:13:42 -03001745 }
Hans Verkuil02f142e2013-12-13 13:13:42 -03001746 return ret;
1747}
1748
Hans Verkuilb2f2f042013-12-13 13:13:41 -03001749static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
Laurent Pinchart012043b2013-08-09 08:11:26 -03001750{
Hans Verkuil41381112013-12-13 13:13:39 -03001751 int ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
1752 struct vb2_buffer *vb;
1753
1754 if (ret)
1755 return ret;
1756
1757 vb = q->bufs[b->index];
Laurent Pinchart012043b2013-08-09 08:11:26 -03001758
1759 switch (vb->state) {
1760 case VB2_BUF_STATE_DEQUEUED:
1761 ret = __buf_prepare(vb, b);
1762 if (ret)
1763 return ret;
Hans Verkuil41381112013-12-13 13:13:39 -03001764 break;
Laurent Pinchart012043b2013-08-09 08:11:26 -03001765 case VB2_BUF_STATE_PREPARED:
1766 break;
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001767 case VB2_BUF_STATE_PREPARING:
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001768 dprintk(1, "buffer still being prepared\n");
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001769 return -EINVAL;
Laurent Pinchart012043b2013-08-09 08:11:26 -03001770 default:
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001771 dprintk(1, "invalid buffer state %d\n", vb->state);
Laurent Pinchart012043b2013-08-09 08:11:26 -03001772 return -EINVAL;
1773 }
1774
1775 /*
1776 * Add to the queued buffers list, a buffer will stay on it until
1777 * dequeued in dqbuf.
1778 */
1779 list_add_tail(&vb->queued_entry, &q->queued_list);
Hans Verkuilb3379c62014-02-24 13:51:03 -03001780 q->queued_count++;
Laurent Pinchart012043b2013-08-09 08:11:26 -03001781 vb->state = VB2_BUF_STATE_QUEUED;
Hans Verkuilf1343282014-02-24 14:44:50 -03001782 if (V4L2_TYPE_IS_OUTPUT(q->type)) {
1783 /*
1784 * For output buffers copy the timestamp if needed,
1785 * and the timecode field and flag if needed.
1786 */
Sakari Ailusc57ff792014-03-01 10:28:02 -03001787 if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
1788 V4L2_BUF_FLAG_TIMESTAMP_COPY)
Hans Verkuilf1343282014-02-24 14:44:50 -03001789 vb->v4l2_buf.timestamp = b->timestamp;
1790 vb->v4l2_buf.flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
1791 if (b->flags & V4L2_BUF_FLAG_TIMECODE)
1792 vb->v4l2_buf.timecode = b->timecode;
1793 }
Laurent Pinchart012043b2013-08-09 08:11:26 -03001794
1795 /*
1796 * If already streaming, give the buffer to driver for processing.
1797 * If not, the buffer will be given to driver on next streamon.
1798 */
Hans Verkuilb3379c62014-02-24 13:51:03 -03001799 if (q->start_streaming_called)
Laurent Pinchart012043b2013-08-09 08:11:26 -03001800 __enqueue_in_driver(vb);
1801
Hans Verkuil41381112013-12-13 13:13:39 -03001802 /* Fill buffer information for the userspace */
1803 __fill_v4l2_buffer(vb, b);
Laurent Pinchart012043b2013-08-09 08:11:26 -03001804
Hans Verkuilb3379c62014-02-24 13:51:03 -03001805 /*
1806 * If streamon has been called, and we haven't yet called
1807 * start_streaming() since not enough buffers were queued, and
1808 * we now have reached the minimum number of queued buffers,
1809 * then we can finally call start_streaming().
1810 */
1811 if (q->streaming && !q->start_streaming_called &&
1812 q->queued_count >= q->min_buffers_needed) {
Hans Verkuil02f142e2013-12-13 13:13:42 -03001813 ret = vb2_start_streaming(q);
1814 if (ret)
1815 return ret;
1816 }
1817
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001818 dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);
Hans Verkuil41381112013-12-13 13:13:39 -03001819 return 0;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001820}
Hans Verkuilb2f2f042013-12-13 13:13:41 -03001821
1822/**
1823 * vb2_qbuf() - Queue a buffer from userspace
1824 * @q: videobuf2 queue
1825 * @b: buffer structure passed from userspace to vidioc_qbuf handler
1826 * in driver
1827 *
1828 * Should be called from vidioc_qbuf ioctl handler of a driver.
1829 * This function:
1830 * 1) verifies the passed buffer,
1831 * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
1832 * which driver-specific buffer initialization can be performed,
1833 * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
1834 * callback for processing.
1835 *
1836 * The return values from this function are intended to be directly returned
1837 * from vidioc_qbuf handler in driver.
1838 */
1839int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
1840{
Hans Verkuil74753cffa2014-04-07 09:23:50 -03001841 if (vb2_fileio_is_active(q)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001842 dprintk(1, "file io in progress\n");
Hans Verkuilb2f2f042013-12-13 13:13:41 -03001843 return -EBUSY;
1844 }
1845
1846 return vb2_internal_qbuf(q, b);
1847}
Pawel Osciake23ccc02010-10-11 10:56:41 -03001848EXPORT_SYMBOL_GPL(vb2_qbuf);
1849
1850/**
1851 * __vb2_wait_for_done_vb() - wait for a buffer to become available
1852 * for dequeuing
1853 *
1854 * Will sleep if required for nonblocking == false.
1855 */
1856static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1857{
1858 /*
1859 * All operations on vb_done_list are performed under done_lock
1860 * spinlock protection. However, buffers may be removed from
1861 * it and returned to userspace only while holding both driver's
1862 * lock and the done_lock spinlock. Thus we can be sure that as
1863 * long as we hold the driver's lock, the list will remain not
1864 * empty if list_empty() check succeeds.
1865 */
1866
1867 for (;;) {
1868 int ret;
1869
1870 if (!q->streaming) {
1871 dprintk(1, "Streaming off, will not wait for buffers\n");
1872 return -EINVAL;
1873 }
1874
1875 if (!list_empty(&q->done_list)) {
1876 /*
1877 * Found a buffer that we were waiting for.
1878 */
1879 break;
1880 }
1881
1882 if (nonblocking) {
1883 dprintk(1, "Nonblocking and no buffers to dequeue, "
1884 "will not wait\n");
1885 return -EAGAIN;
1886 }
1887
1888 /*
1889 * We are streaming and blocking, wait for another buffer to
1890 * become ready or for streamoff. Driver's lock is released to
1891 * allow streamoff or qbuf to be called while waiting.
1892 */
Hans Verkuila1d36d82014-03-17 09:54:21 -03001893 call_void_qop(q, wait_prepare, q);
Pawel Osciake23ccc02010-10-11 10:56:41 -03001894
1895 /*
1896 * All locks have been released, it is safe to sleep now.
1897 */
1898 dprintk(3, "Will sleep waiting for buffers\n");
1899 ret = wait_event_interruptible(q->done_wq,
1900 !list_empty(&q->done_list) || !q->streaming);
1901
1902 /*
1903 * We need to reevaluate both conditions again after reacquiring
1904 * the locks or return an error if one occurred.
1905 */
Hans Verkuila1d36d82014-03-17 09:54:21 -03001906 call_void_qop(q, wait_finish, q);
Hans Verkuil32a77262012-09-28 06:12:53 -03001907 if (ret) {
1908 dprintk(1, "Sleep was interrupted\n");
Pawel Osciake23ccc02010-10-11 10:56:41 -03001909 return ret;
Hans Verkuil32a77262012-09-28 06:12:53 -03001910 }
Pawel Osciake23ccc02010-10-11 10:56:41 -03001911 }
1912 return 0;
1913}
1914
1915/**
1916 * __vb2_get_done_vb() - get a buffer ready for dequeuing
1917 *
1918 * Will sleep if required for nonblocking == false.
1919 */
1920static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
Hans Verkuil32a77262012-09-28 06:12:53 -03001921 struct v4l2_buffer *b, int nonblocking)
Pawel Osciake23ccc02010-10-11 10:56:41 -03001922{
1923 unsigned long flags;
1924 int ret;
1925
1926 /*
1927 * Wait for at least one buffer to become available on the done_list.
1928 */
1929 ret = __vb2_wait_for_done_vb(q, nonblocking);
1930 if (ret)
1931 return ret;
1932
1933 /*
1934 * Driver's lock has been held since we last verified that done_list
1935 * is not empty, so no need for another list_empty(done_list) check.
1936 */
1937 spin_lock_irqsave(&q->done_lock, flags);
1938 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
Hans Verkuil32a77262012-09-28 06:12:53 -03001939 /*
1940 * Only remove the buffer from done_list if v4l2_buffer can handle all
1941 * the planes.
1942 */
1943 ret = __verify_planes_array(*vb, b);
1944 if (!ret)
1945 list_del(&(*vb)->done_entry);
Pawel Osciake23ccc02010-10-11 10:56:41 -03001946 spin_unlock_irqrestore(&q->done_lock, flags);
1947
Hans Verkuil32a77262012-09-28 06:12:53 -03001948 return ret;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001949}
1950
1951/**
1952 * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
1953 * @q: videobuf2 queue
1954 *
1955 * This function will wait until all buffers that have been given to the driver
1956 * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call
1957 * wait_prepare, wait_finish pair. It is intended to be called with all locks
1958 * taken, for example from stop_streaming() callback.
1959 */
1960int vb2_wait_for_all_buffers(struct vb2_queue *q)
1961{
1962 if (!q->streaming) {
1963 dprintk(1, "Streaming off, will not wait for buffers\n");
1964 return -EINVAL;
1965 }
1966
Hans Verkuilb3379c62014-02-24 13:51:03 -03001967 if (q->start_streaming_called)
Hans Verkuil6ea3b982014-02-06 05:46:11 -03001968 wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
Pawel Osciake23ccc02010-10-11 10:56:41 -03001969 return 0;
1970}
1971EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
1972
1973/**
Sumit Semwalc5384042012-06-14 10:37:37 -03001974 * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
1975 */
1976static void __vb2_dqbuf(struct vb2_buffer *vb)
1977{
1978 struct vb2_queue *q = vb->vb2_queue;
1979 unsigned int i;
1980
1981 /* nothing to do if the buffer is already dequeued */
1982 if (vb->state == VB2_BUF_STATE_DEQUEUED)
1983 return;
1984
1985 vb->state = VB2_BUF_STATE_DEQUEUED;
1986
1987 /* unmap DMABUF buffer */
1988 if (q->memory == V4L2_MEMORY_DMABUF)
1989 for (i = 0; i < vb->num_planes; ++i) {
1990 if (!vb->planes[i].dbuf_mapped)
1991 continue;
Hans Verkuila1d36d82014-03-17 09:54:21 -03001992 call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
Sumit Semwalc5384042012-06-14 10:37:37 -03001993 vb->planes[i].dbuf_mapped = 0;
1994 }
1995}
1996
Hans Verkuilb2f2f042013-12-13 13:13:41 -03001997static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
Pawel Osciake23ccc02010-10-11 10:56:41 -03001998{
1999 struct vb2_buffer *vb = NULL;
2000 int ret;
2001
2002 if (b->type != q->type) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002003 dprintk(1, "invalid buffer type\n");
Pawel Osciake23ccc02010-10-11 10:56:41 -03002004 return -EINVAL;
2005 }
Hans Verkuil32a77262012-09-28 06:12:53 -03002006 ret = __vb2_get_done_vb(q, &vb, b, nonblocking);
2007 if (ret < 0)
Pawel Osciake23ccc02010-10-11 10:56:41 -03002008 return ret;
Pawel Osciake23ccc02010-10-11 10:56:41 -03002009
Pawel Osciake23ccc02010-10-11 10:56:41 -03002010 switch (vb->state) {
2011 case VB2_BUF_STATE_DONE:
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002012 dprintk(3, "Returning done buffer\n");
Pawel Osciake23ccc02010-10-11 10:56:41 -03002013 break;
2014 case VB2_BUF_STATE_ERROR:
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002015 dprintk(3, "Returning done buffer with errors\n");
Pawel Osciake23ccc02010-10-11 10:56:41 -03002016 break;
2017 default:
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002018 dprintk(1, "Invalid buffer state\n");
Pawel Osciake23ccc02010-10-11 10:56:41 -03002019 return -EINVAL;
2020 }
2021
Hans Verkuila1d36d82014-03-17 09:54:21 -03002022 call_void_vb_qop(vb, buf_finish, vb);
Hans Verkuil9cf3c312014-02-28 13:30:48 -03002023
Pawel Osciake23ccc02010-10-11 10:56:41 -03002024 /* Fill buffer information for the userspace */
2025 __fill_v4l2_buffer(vb, b);
2026 /* Remove from videobuf queue */
2027 list_del(&vb->queued_entry);
Hans Verkuilb3379c62014-02-24 13:51:03 -03002028 q->queued_count--;
Sumit Semwalc5384042012-06-14 10:37:37 -03002029 /* go back to dequeued state */
2030 __vb2_dqbuf(vb);
Pawel Osciake23ccc02010-10-11 10:56:41 -03002031
2032 dprintk(1, "dqbuf of buffer %d, with state %d\n",
2033 vb->v4l2_buf.index, vb->state);
2034
Pawel Osciake23ccc02010-10-11 10:56:41 -03002035 return 0;
2036}
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002037
2038/**
2039 * vb2_dqbuf() - Dequeue a buffer to the userspace
2040 * @q: videobuf2 queue
2041 * @b: buffer structure passed from userspace to vidioc_dqbuf handler
2042 * in driver
2043 * @nonblocking: if true, this call will not sleep waiting for a buffer if no
2044 * buffers ready for dequeuing are present. Normally the driver
2045 * would be passing (file->f_flags & O_NONBLOCK) here
2046 *
2047 * Should be called from vidioc_dqbuf ioctl handler of a driver.
2048 * This function:
2049 * 1) verifies the passed buffer,
2050 * 2) calls buf_finish callback in the driver (if provided), in which
2051 * driver can perform any additional operations that may be required before
2052 * returning the buffer to userspace, such as cache sync,
2053 * 3) the buffer struct members are filled with relevant information for
2054 * the userspace.
2055 *
2056 * The return values from this function are intended to be directly returned
2057 * from vidioc_dqbuf handler in driver.
2058 */
2059int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
2060{
Hans Verkuil74753cffa2014-04-07 09:23:50 -03002061 if (vb2_fileio_is_active(q)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002062 dprintk(1, "file io in progress\n");
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002063 return -EBUSY;
2064 }
2065 return vb2_internal_dqbuf(q, b, nonblocking);
2066}
Pawel Osciake23ccc02010-10-11 10:56:41 -03002067EXPORT_SYMBOL_GPL(vb2_dqbuf);
2068
2069/**
Pawel Osciake23ccc02010-10-11 10:56:41 -03002070 * __vb2_queue_cancel() - cancel and stop (pause) streaming
2071 *
2072 * Removes all queued buffers from driver's queue and all buffers queued by
2073 * userspace from videobuf's queue. Returns to state after reqbufs.
2074 */
2075static void __vb2_queue_cancel(struct vb2_queue *q)
2076{
2077 unsigned int i;
2078
2079 /*
2080 * Tell driver to stop all transactions and release all queued
2081 * buffers.
2082 */
Hans Verkuilb3379c62014-02-24 13:51:03 -03002083 if (q->start_streaming_called)
Pawel Osciake23ccc02010-10-11 10:56:41 -03002084 call_qop(q, stop_streaming, q);
2085 q->streaming = 0;
Hans Verkuilb3379c62014-02-24 13:51:03 -03002086 q->start_streaming_called = 0;
2087 q->queued_count = 0;
2088
2089 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
2090 for (i = 0; i < q->num_buffers; ++i)
2091 if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE)
2092 vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
2093 /* Must be zero now */
2094 WARN_ON(atomic_read(&q->owned_by_drv_count));
2095 }
Pawel Osciake23ccc02010-10-11 10:56:41 -03002096
2097 /*
2098 * Remove all buffers from videobuf's list...
2099 */
2100 INIT_LIST_HEAD(&q->queued_list);
2101 /*
2102 * ...and done list; userspace will not receive any buffers it
2103 * has not already dequeued before initiating cancel.
2104 */
2105 INIT_LIST_HEAD(&q->done_list);
Hans Verkuil6ea3b982014-02-06 05:46:11 -03002106 atomic_set(&q->owned_by_drv_count, 0);
Pawel Osciake23ccc02010-10-11 10:56:41 -03002107 wake_up_all(&q->done_wq);
2108
2109 /*
2110 * Reinitialize all buffers for next use.
Hans Verkuil9c0863b2014-03-04 07:34:49 -03002111 * Make sure to call buf_finish for any queued buffers. Normally
2112 * that's done in dqbuf, but that's not going to happen when we
2113 * cancel the whole queue. Note: this code belongs here, not in
2114 * __vb2_dqbuf() since in vb2_internal_dqbuf() there is a critical
2115 * call to __fill_v4l2_buffer() after buf_finish(). That order can't
2116 * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
Pawel Osciake23ccc02010-10-11 10:56:41 -03002117 */
Hans Verkuil9c0863b2014-03-04 07:34:49 -03002118 for (i = 0; i < q->num_buffers; ++i) {
2119 struct vb2_buffer *vb = q->bufs[i];
2120
2121 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
2122 vb->state = VB2_BUF_STATE_PREPARED;
Hans Verkuila1d36d82014-03-17 09:54:21 -03002123 call_void_vb_qop(vb, buf_finish, vb);
Hans Verkuil9c0863b2014-03-04 07:34:49 -03002124 }
2125 __vb2_dqbuf(vb);
2126 }
Pawel Osciake23ccc02010-10-11 10:56:41 -03002127}
2128
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002129static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002130{
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002131 int ret;
2132
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002133 if (type != q->type) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002134 dprintk(1, "invalid stream type\n");
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002135 return -EINVAL;
2136 }
2137
2138 if (q->streaming) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002139 dprintk(3, "already streaming\n");
Ricardo Ribaldaf9560352013-11-08 07:08:45 -03002140 return 0;
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002141 }
2142
Ricardo Ribalda548df782014-01-08 05:01:33 -03002143 if (!q->num_buffers) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002144 dprintk(1, "no buffers have been allocated\n");
Ricardo Ribalda548df782014-01-08 05:01:33 -03002145 return -EINVAL;
2146 }
2147
Ricardo Ribalda Delgado249f5a52014-01-08 05:01:33 -03002148 if (!q->num_buffers) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002149 dprintk(1, "no buffers have been allocated\n");
Ricardo Ribalda Delgado249f5a52014-01-08 05:01:33 -03002150 return -EINVAL;
2151 }
Hans Verkuilb3379c62014-02-24 13:51:03 -03002152 if (q->num_buffers < q->min_buffers_needed) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002153 dprintk(1, "need at least %u allocated buffers\n",
Hans Verkuilb3379c62014-02-24 13:51:03 -03002154 q->min_buffers_needed);
2155 return -EINVAL;
2156 }
Ricardo Ribalda Delgado249f5a52014-01-08 05:01:33 -03002157
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002158 /*
Hans Verkuilb3379c62014-02-24 13:51:03 -03002159 * Tell driver to start streaming provided sufficient buffers
2160 * are available.
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002161 */
Hans Verkuilb3379c62014-02-24 13:51:03 -03002162 if (q->queued_count >= q->min_buffers_needed) {
2163 ret = vb2_start_streaming(q);
2164 if (ret) {
2165 __vb2_queue_cancel(q);
2166 return ret;
2167 }
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002168 }
2169
2170 q->streaming = 1;
2171
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002172 dprintk(3, "successful\n");
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002173 return 0;
2174}
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002175
2176/**
2177 * vb2_streamon - start streaming
2178 * @q: videobuf2 queue
2179 * @type: type argument passed from userspace to vidioc_streamon handler
2180 *
2181 * Should be called from vidioc_streamon handler of a driver.
2182 * This function:
2183 * 1) verifies current state
2184 * 2) passes any previously queued buffers to the driver and starts streaming
2185 *
2186 * The return values from this function are intended to be directly returned
2187 * from vidioc_streamon handler in the driver.
2188 */
2189int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
2190{
Hans Verkuil74753cffa2014-04-07 09:23:50 -03002191 if (vb2_fileio_is_active(q)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002192 dprintk(1, "file io in progress\n");
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002193 return -EBUSY;
2194 }
2195 return vb2_internal_streamon(q, type);
2196}
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002197EXPORT_SYMBOL_GPL(vb2_streamon);
2198
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002199static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
2200{
2201 if (type != q->type) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002202 dprintk(1, "invalid stream type\n");
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002203 return -EINVAL;
2204 }
2205
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002206 /*
2207 * Cancel will pause streaming and remove all buffers from the driver
2208 * and videobuf, effectively returning control over them to userspace.
Hans Verkuil3f1a9a32014-02-25 09:42:45 -03002209 *
2210 * Note that we do this even if q->streaming == 0: if you prepare or
2211 * queue buffers, and then call streamoff without ever having called
2212 * streamon, you would still expect those buffers to be returned to
2213 * their normal dequeued state.
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002214 */
2215 __vb2_queue_cancel(q);
2216
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002217 dprintk(3, "successful\n");
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002218 return 0;
2219}
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002220
2221/**
Pawel Osciake23ccc02010-10-11 10:56:41 -03002222 * vb2_streamoff - stop streaming
2223 * @q: videobuf2 queue
2224 * @type: type argument passed from userspace to vidioc_streamoff handler
2225 *
2226 * Should be called from vidioc_streamoff handler of a driver.
2227 * This function:
2228 * 1) verifies current state,
2229 * 2) stop streaming and dequeues any queued buffers, including those previously
2230 * passed to the driver (after waiting for the driver to finish).
2231 *
2232 * This call can be used for pausing playback.
2233 * The return values from this function are intended to be directly returned
2234 * from vidioc_streamoff handler in the driver
2235 */
2236int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
2237{
Hans Verkuil74753cffa2014-04-07 09:23:50 -03002238 if (vb2_fileio_is_active(q)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002239 dprintk(1, "file io in progress\n");
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002240 return -EBUSY;
2241 }
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002242 return vb2_internal_streamoff(q, type);
Pawel Osciake23ccc02010-10-11 10:56:41 -03002243}
2244EXPORT_SYMBOL_GPL(vb2_streamoff);
2245
2246/**
2247 * __find_plane_by_offset() - find plane associated with the given offset off
2248 */
2249static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
2250 unsigned int *_buffer, unsigned int *_plane)
2251{
2252 struct vb2_buffer *vb;
2253 unsigned int buffer, plane;
2254
2255 /*
2256 * Go over all buffers and their planes, comparing the given offset
2257 * with an offset assigned to each plane. If a match is found,
2258 * return its buffer and plane numbers.
2259 */
2260 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
2261 vb = q->bufs[buffer];
2262
2263 for (plane = 0; plane < vb->num_planes; ++plane) {
2264 if (vb->v4l2_planes[plane].m.mem_offset == off) {
2265 *_buffer = buffer;
2266 *_plane = plane;
2267 return 0;
2268 }
2269 }
2270 }
2271
2272 return -EINVAL;
2273}
2274
2275/**
Tomasz Stanislawski83ae7c52012-06-14 11:32:24 -03002276 * vb2_expbuf() - Export a buffer as a file descriptor
2277 * @q: videobuf2 queue
2278 * @eb: export buffer structure passed from userspace to vidioc_expbuf
2279 * handler in driver
2280 *
2281 * The return values from this function are intended to be directly returned
2282 * from vidioc_expbuf handler in driver.
2283 */
2284int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
2285{
2286 struct vb2_buffer *vb = NULL;
2287 struct vb2_plane *vb_plane;
2288 int ret;
2289 struct dma_buf *dbuf;
2290
2291 if (q->memory != V4L2_MEMORY_MMAP) {
2292 dprintk(1, "Queue is not currently set up for mmap\n");
2293 return -EINVAL;
2294 }
2295
2296 if (!q->mem_ops->get_dmabuf) {
2297 dprintk(1, "Queue does not support DMA buffer exporting\n");
2298 return -EINVAL;
2299 }
2300
Philipp Zabelea3aba82013-05-21 05:11:35 -03002301 if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) {
2302 dprintk(1, "Queue does support only O_CLOEXEC and access mode flags\n");
Tomasz Stanislawski83ae7c52012-06-14 11:32:24 -03002303 return -EINVAL;
2304 }
2305
2306 if (eb->type != q->type) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002307 dprintk(1, "invalid buffer type\n");
Tomasz Stanislawski83ae7c52012-06-14 11:32:24 -03002308 return -EINVAL;
2309 }
2310
2311 if (eb->index >= q->num_buffers) {
2312 dprintk(1, "buffer index out of range\n");
2313 return -EINVAL;
2314 }
2315
2316 vb = q->bufs[eb->index];
2317
2318 if (eb->plane >= vb->num_planes) {
2319 dprintk(1, "buffer plane out of range\n");
2320 return -EINVAL;
2321 }
2322
Hans Verkuil74753cffa2014-04-07 09:23:50 -03002323 if (vb2_fileio_is_active(q)) {
2324 dprintk(1, "expbuf: file io in progress\n");
2325 return -EBUSY;
2326 }
2327
Tomasz Stanislawski83ae7c52012-06-14 11:32:24 -03002328 vb_plane = &vb->planes[eb->plane];
2329
Hans Verkuila1d36d82014-03-17 09:54:21 -03002330 dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE);
Tomasz Stanislawski83ae7c52012-06-14 11:32:24 -03002331 if (IS_ERR_OR_NULL(dbuf)) {
2332 dprintk(1, "Failed to export buffer %d, plane %d\n",
2333 eb->index, eb->plane);
2334 return -EINVAL;
2335 }
2336
Philipp Zabelea3aba82013-05-21 05:11:35 -03002337 ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE);
Tomasz Stanislawski83ae7c52012-06-14 11:32:24 -03002338 if (ret < 0) {
2339 dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
2340 eb->index, eb->plane, ret);
2341 dma_buf_put(dbuf);
2342 return ret;
2343 }
2344
2345 dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
2346 eb->index, eb->plane, ret);
2347 eb->fd = ret;
2348
2349 return 0;
2350}
2351EXPORT_SYMBOL_GPL(vb2_expbuf);
2352
2353/**
Pawel Osciake23ccc02010-10-11 10:56:41 -03002354 * vb2_mmap() - map video buffers into application address space
2355 * @q: videobuf2 queue
2356 * @vma: vma passed to the mmap file operation handler in the driver
2357 *
2358 * Should be called from mmap file operation handler of a driver.
2359 * This function maps one plane of one of the available video buffers to
2360 * userspace. To map whole video memory allocated on reqbufs, this function
2361 * has to be called once per each plane per each buffer previously allocated.
2362 *
2363 * When the userspace application calls mmap, it passes to it an offset returned
2364 * to it earlier by the means of vidioc_querybuf handler. That offset acts as
2365 * a "cookie", which is then used to identify the plane to be mapped.
2366 * This function finds a plane with a matching offset and a mapping is performed
2367 * by the means of a provided memory operation.
2368 *
2369 * The return values from this function are intended to be directly returned
2370 * from the mmap handler in driver.
2371 */
2372int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
2373{
2374 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
Pawel Osciake23ccc02010-10-11 10:56:41 -03002375 struct vb2_buffer *vb;
2376 unsigned int buffer, plane;
2377 int ret;
Mauro Carvalho Chehab7f841452013-04-19 07:18:01 -03002378 unsigned long length;
Pawel Osciake23ccc02010-10-11 10:56:41 -03002379
2380 if (q->memory != V4L2_MEMORY_MMAP) {
2381 dprintk(1, "Queue is not currently set up for mmap\n");
2382 return -EINVAL;
2383 }
2384
2385 /*
2386 * Check memory area access mode.
2387 */
2388 if (!(vma->vm_flags & VM_SHARED)) {
2389 dprintk(1, "Invalid vma flags, VM_SHARED needed\n");
2390 return -EINVAL;
2391 }
2392 if (V4L2_TYPE_IS_OUTPUT(q->type)) {
2393 if (!(vma->vm_flags & VM_WRITE)) {
2394 dprintk(1, "Invalid vma flags, VM_WRITE needed\n");
2395 return -EINVAL;
2396 }
2397 } else {
2398 if (!(vma->vm_flags & VM_READ)) {
2399 dprintk(1, "Invalid vma flags, VM_READ needed\n");
2400 return -EINVAL;
2401 }
2402 }
Hans Verkuil74753cffa2014-04-07 09:23:50 -03002403 if (vb2_fileio_is_active(q)) {
2404 dprintk(1, "mmap: file io in progress\n");
2405 return -EBUSY;
2406 }
Pawel Osciake23ccc02010-10-11 10:56:41 -03002407
2408 /*
2409 * Find the plane corresponding to the offset passed by userspace.
2410 */
2411 ret = __find_plane_by_offset(q, off, &buffer, &plane);
2412 if (ret)
2413 return ret;
2414
2415 vb = q->bufs[buffer];
Pawel Osciake23ccc02010-10-11 10:56:41 -03002416
Mauro Carvalho Chehab7f841452013-04-19 07:18:01 -03002417 /*
2418 * MMAP requires page_aligned buffers.
2419 * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
2420 * so, we need to do the same here.
2421 */
2422 length = PAGE_ALIGN(vb->v4l2_planes[plane].length);
2423 if (length < (vma->vm_end - vma->vm_start)) {
2424 dprintk(1,
2425 "MMAP invalid, as it would overflow buffer length\n");
Seung-Woo Kim068a0df2013-04-11 23:57:57 -03002426 return -EINVAL;
2427 }
2428
Hans Verkuilb5b45412014-01-29 11:53:25 -03002429 ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
Hans Verkuila1d36d82014-03-17 09:54:21 -03002430 if (ret)
Pawel Osciake23ccc02010-10-11 10:56:41 -03002431 return ret;
2432
Pawel Osciake23ccc02010-10-11 10:56:41 -03002433 dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane);
2434 return 0;
2435}
2436EXPORT_SYMBOL_GPL(vb2_mmap);
2437
Scott Jiang6f524ec2011-09-21 09:25:23 -03002438#ifndef CONFIG_MMU
2439unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
2440 unsigned long addr,
2441 unsigned long len,
2442 unsigned long pgoff,
2443 unsigned long flags)
2444{
2445 unsigned long off = pgoff << PAGE_SHIFT;
2446 struct vb2_buffer *vb;
2447 unsigned int buffer, plane;
2448 int ret;
2449
2450 if (q->memory != V4L2_MEMORY_MMAP) {
2451 dprintk(1, "Queue is not currently set up for mmap\n");
2452 return -EINVAL;
2453 }
2454
2455 /*
2456 * Find the plane corresponding to the offset passed by userspace.
2457 */
2458 ret = __find_plane_by_offset(q, off, &buffer, &plane);
2459 if (ret)
2460 return ret;
2461
2462 vb = q->bufs[buffer];
2463
2464 return (unsigned long)vb2_plane_vaddr(vb, plane);
2465}
2466EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
2467#endif
2468
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002469static int __vb2_init_fileio(struct vb2_queue *q, int read);
2470static int __vb2_cleanup_fileio(struct vb2_queue *q);
Pawel Osciake23ccc02010-10-11 10:56:41 -03002471
2472/**
2473 * vb2_poll() - implements poll userspace operation
2474 * @q: videobuf2 queue
2475 * @file: file argument passed to the poll file operation handler
2476 * @wait: wait argument passed to the poll file operation handler
2477 *
2478 * This function implements poll file operation handler for a driver.
2479 * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
2480 * be informed that the file descriptor of a video device is available for
2481 * reading.
2482 * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
2483 * will be reported as available for writing.
2484 *
Hans Verkuil95213ce2011-07-13 04:26:52 -03002485 * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any
2486 * pending events.
2487 *
Pawel Osciake23ccc02010-10-11 10:56:41 -03002488 * The return values from this function are intended to be directly returned
2489 * from poll handler in driver.
2490 */
2491unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
2492{
Hans Verkuil95213ce2011-07-13 04:26:52 -03002493 struct video_device *vfd = video_devdata(file);
Hans Verkuilbf5c7cb2011-07-13 04:01:30 -03002494 unsigned long req_events = poll_requested_events(wait);
Pawel Osciake23ccc02010-10-11 10:56:41 -03002495 struct vb2_buffer *vb = NULL;
Hans Verkuil95213ce2011-07-13 04:26:52 -03002496 unsigned int res = 0;
2497 unsigned long flags;
2498
2499 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
2500 struct v4l2_fh *fh = file->private_data;
2501
2502 if (v4l2_event_pending(fh))
2503 res = POLLPRI;
2504 else if (req_events & POLLPRI)
2505 poll_wait(file, &fh->wait, wait);
2506 }
Pawel Osciake23ccc02010-10-11 10:56:41 -03002507
Hans Verkuilcd138232013-01-30 13:29:02 -03002508 if (!V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLIN | POLLRDNORM)))
2509 return res;
2510 if (V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLOUT | POLLWRNORM)))
2511 return res;
2512
Pawel Osciake23ccc02010-10-11 10:56:41 -03002513 /*
Pawel Osciak4ffabdb2011-03-20 18:17:34 -03002514 * Start file I/O emulator only if streaming API has not been used yet.
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002515 */
Hans Verkuil74753cffa2014-04-07 09:23:50 -03002516 if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
Hans Verkuilbf5c7cb2011-07-13 04:01:30 -03002517 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
2518 (req_events & (POLLIN | POLLRDNORM))) {
Hans Verkuil95213ce2011-07-13 04:26:52 -03002519 if (__vb2_init_fileio(q, 1))
2520 return res | POLLERR;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002521 }
Hans Verkuilbf5c7cb2011-07-13 04:01:30 -03002522 if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
2523 (req_events & (POLLOUT | POLLWRNORM))) {
Hans Verkuil95213ce2011-07-13 04:26:52 -03002524 if (__vb2_init_fileio(q, 0))
2525 return res | POLLERR;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002526 /*
2527 * Write to OUTPUT queue can be done immediately.
2528 */
Hans Verkuil95213ce2011-07-13 04:26:52 -03002529 return res | POLLOUT | POLLWRNORM;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002530 }
2531 }
2532
2533 /*
Pawel Osciake23ccc02010-10-11 10:56:41 -03002534 * There is nothing to wait for if no buffers have already been queued.
2535 */
2536 if (list_empty(&q->queued_list))
Hans Verkuil95213ce2011-07-13 04:26:52 -03002537 return res | POLLERR;
Pawel Osciake23ccc02010-10-11 10:56:41 -03002538
Seung-Woo Kim412cb872013-05-20 23:47:29 -03002539 if (list_empty(&q->done_list))
2540 poll_wait(file, &q->done_wq, wait);
Pawel Osciake23ccc02010-10-11 10:56:41 -03002541
2542 /*
2543 * Take first buffer available for dequeuing.
2544 */
2545 spin_lock_irqsave(&q->done_lock, flags);
2546 if (!list_empty(&q->done_list))
2547 vb = list_first_entry(&q->done_list, struct vb2_buffer,
2548 done_entry);
2549 spin_unlock_irqrestore(&q->done_lock, flags);
2550
2551 if (vb && (vb->state == VB2_BUF_STATE_DONE
2552 || vb->state == VB2_BUF_STATE_ERROR)) {
Hans Verkuil95213ce2011-07-13 04:26:52 -03002553 return (V4L2_TYPE_IS_OUTPUT(q->type)) ?
2554 res | POLLOUT | POLLWRNORM :
2555 res | POLLIN | POLLRDNORM;
Pawel Osciake23ccc02010-10-11 10:56:41 -03002556 }
Hans Verkuil95213ce2011-07-13 04:26:52 -03002557 return res;
Pawel Osciake23ccc02010-10-11 10:56:41 -03002558}
2559EXPORT_SYMBOL_GPL(vb2_poll);
2560
2561/**
2562 * vb2_queue_init() - initialize a videobuf2 queue
2563 * @q: videobuf2 queue; this structure should be allocated in driver
2564 *
2565 * The vb2_queue structure should be allocated by the driver. The driver is
2566 * responsible of clearing it's content and setting initial values for some
2567 * required entries before calling this function.
2568 * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
2569 * to the struct vb2_queue description in include/media/videobuf2-core.h
2570 * for more information.
2571 */
2572int vb2_queue_init(struct vb2_queue *q)
2573{
Ezequiel Garcia896f38f2012-09-17 14:59:30 -03002574 /*
2575 * Sanity check
2576 */
2577 if (WARN_ON(!q) ||
2578 WARN_ON(!q->ops) ||
2579 WARN_ON(!q->mem_ops) ||
2580 WARN_ON(!q->type) ||
2581 WARN_ON(!q->io_modes) ||
2582 WARN_ON(!q->ops->queue_setup) ||
Kamil Debski6aa69f92013-01-25 06:29:57 -03002583 WARN_ON(!q->ops->buf_queue) ||
Sakari Ailus872484c2013-08-25 17:57:03 -03002584 WARN_ON(q->timestamp_flags &
2585 ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
2586 V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
Ezequiel Garcia896f38f2012-09-17 14:59:30 -03002587 return -EINVAL;
Pawel Osciake23ccc02010-10-11 10:56:41 -03002588
Kamil Debski6aa69f92013-01-25 06:29:57 -03002589 /* Warn that the driver should choose an appropriate timestamp type */
Sakari Ailusc57ff792014-03-01 10:28:02 -03002590 WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
2591 V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
Kamil Debski6aa69f92013-01-25 06:29:57 -03002592
Pawel Osciake23ccc02010-10-11 10:56:41 -03002593 INIT_LIST_HEAD(&q->queued_list);
2594 INIT_LIST_HEAD(&q->done_list);
2595 spin_lock_init(&q->done_lock);
2596 init_waitqueue_head(&q->done_wq);
2597
2598 if (q->buf_struct_size == 0)
2599 q->buf_struct_size = sizeof(struct vb2_buffer);
2600
2601 return 0;
2602}
2603EXPORT_SYMBOL_GPL(vb2_queue_init);
2604
2605/**
2606 * vb2_queue_release() - stop streaming, release the queue and free memory
2607 * @q: videobuf2 queue
2608 *
2609 * This function stops streaming and performs necessary clean ups, including
2610 * freeing video buffer memory. The driver is responsible for freeing
2611 * the vb2_queue structure itself.
2612 */
2613void vb2_queue_release(struct vb2_queue *q)
2614{
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002615 __vb2_cleanup_fileio(q);
Pawel Osciake23ccc02010-10-11 10:56:41 -03002616 __vb2_queue_cancel(q);
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03002617 __vb2_queue_free(q, q->num_buffers);
Pawel Osciake23ccc02010-10-11 10:56:41 -03002618}
2619EXPORT_SYMBOL_GPL(vb2_queue_release);
2620
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002621/**
2622 * struct vb2_fileio_buf - buffer context used by file io emulator
2623 *
2624 * vb2 provides a compatibility layer and emulator of file io (read and
2625 * write) calls on top of streaming API. This structure is used for
2626 * tracking context related to the buffers.
2627 */
2628struct vb2_fileio_buf {
2629 void *vaddr;
2630 unsigned int size;
2631 unsigned int pos;
2632 unsigned int queued:1;
2633};
2634
2635/**
2636 * struct vb2_fileio_data - queue context used by file io emulator
2637 *
Hans Verkuil4e5a4d82014-02-14 06:46:50 -03002638 * @cur_index: the index of the buffer currently being read from or
2639 * written to. If equal to q->num_buffers then a new buffer
2640 * must be dequeued.
2641 * @initial_index: in the read() case all buffers are queued up immediately
2642 * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
2643 * buffers. However, in the write() case no buffers are initially
2644 * queued, instead whenever a buffer is full it is queued up by
2645 * __vb2_perform_fileio(). Only once all available buffers have
2646 * been queued up will __vb2_perform_fileio() start to dequeue
2647 * buffers. This means that initially __vb2_perform_fileio()
2648 * needs to know what buffer index to use when it is queuing up
2649 * the buffers for the first time. That initial index is stored
2650 * in this field. Once it is equal to q->num_buffers all
2651 * available buffers have been queued and __vb2_perform_fileio()
2652 * should start the normal dequeue/queue cycle.
2653 *
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002654 * vb2 provides a compatibility layer and emulator of file io (read and
2655 * write) calls on top of streaming API. For proper operation it required
2656 * this structure to save the driver state between each call of the read
2657 * or write function.
2658 */
2659struct vb2_fileio_data {
2660 struct v4l2_requestbuffers req;
2661 struct v4l2_buffer b;
2662 struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME];
Hans Verkuil4e5a4d82014-02-14 06:46:50 -03002663 unsigned int cur_index;
2664 unsigned int initial_index;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002665 unsigned int q_count;
2666 unsigned int dq_count;
2667 unsigned int flags;
2668};
2669
2670/**
2671 * __vb2_init_fileio() - initialize file io emulator
2672 * @q: videobuf2 queue
2673 * @read: mode selector (1 means read, 0 means write)
2674 */
2675static int __vb2_init_fileio(struct vb2_queue *q, int read)
2676{
2677 struct vb2_fileio_data *fileio;
2678 int i, ret;
2679 unsigned int count = 0;
2680
2681 /*
2682 * Sanity check
2683 */
Hans Verkuile4d25812014-02-03 11:22:45 -03002684 if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
2685 (!read && !(q->io_modes & VB2_WRITE))))
2686 return -EINVAL;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002687
2688 /*
2689 * Check if device supports mapping buffers to kernel virtual space.
2690 */
2691 if (!q->mem_ops->vaddr)
2692 return -EBUSY;
2693
2694 /*
2695 * Check if streaming api has not been already activated.
2696 */
2697 if (q->streaming || q->num_buffers > 0)
2698 return -EBUSY;
2699
2700 /*
2701 * Start with count 1, driver can increase it in queue_setup()
2702 */
2703 count = 1;
2704
2705 dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n",
2706 (read) ? "read" : "write", count, q->io_flags);
2707
2708 fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
2709 if (fileio == NULL)
2710 return -ENOMEM;
2711
2712 fileio->flags = q->io_flags;
2713
2714 /*
2715 * Request buffers and use MMAP type to force driver
2716 * to allocate buffers by itself.
2717 */
2718 fileio->req.count = count;
2719 fileio->req.memory = V4L2_MEMORY_MMAP;
2720 fileio->req.type = q->type;
Hans Verkuil74753cffa2014-04-07 09:23:50 -03002721 q->fileio = fileio;
2722 ret = __reqbufs(q, &fileio->req);
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002723 if (ret)
2724 goto err_kfree;
2725
2726 /*
2727 * Check if plane_count is correct
2728 * (multiplane buffers are not supported).
2729 */
2730 if (q->bufs[0]->num_planes != 1) {
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002731 ret = -EBUSY;
2732 goto err_reqbufs;
2733 }
2734
2735 /*
2736 * Get kernel address of each buffer.
2737 */
2738 for (i = 0; i < q->num_buffers; i++) {
2739 fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
Wei Yongjun5dd69462013-05-13 01:48:45 -03002740 if (fileio->bufs[i].vaddr == NULL) {
2741 ret = -EINVAL;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002742 goto err_reqbufs;
Wei Yongjun5dd69462013-05-13 01:48:45 -03002743 }
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002744 fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
2745 }
2746
2747 /*
2748 * Read mode requires pre queuing of all buffers.
2749 */
2750 if (read) {
2751 /*
2752 * Queue all buffers.
2753 */
2754 for (i = 0; i < q->num_buffers; i++) {
2755 struct v4l2_buffer *b = &fileio->b;
2756 memset(b, 0, sizeof(*b));
2757 b->type = q->type;
2758 b->memory = q->memory;
2759 b->index = i;
Hans Verkuil74753cffa2014-04-07 09:23:50 -03002760 ret = vb2_internal_qbuf(q, b);
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002761 if (ret)
2762 goto err_reqbufs;
2763 fileio->bufs[i].queued = 1;
2764 }
Hans Verkuil4e5a4d82014-02-14 06:46:50 -03002765 /*
2766 * All buffers have been queued, so mark that by setting
2767 * initial_index to q->num_buffers
2768 */
2769 fileio->initial_index = q->num_buffers;
2770 fileio->cur_index = q->num_buffers;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002771 }
2772
Hans Verkuil02f142e2013-12-13 13:13:42 -03002773 /*
2774 * Start streaming.
2775 */
Hans Verkuil74753cffa2014-04-07 09:23:50 -03002776 ret = vb2_internal_streamon(q, q->type);
Hans Verkuil02f142e2013-12-13 13:13:42 -03002777 if (ret)
2778 goto err_reqbufs;
2779
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002780 return ret;
2781
2782err_reqbufs:
Hans de Goedea67e1722012-05-08 14:47:39 -03002783 fileio->req.count = 0;
Hans Verkuil74753cffa2014-04-07 09:23:50 -03002784 __reqbufs(q, &fileio->req);
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002785
2786err_kfree:
Hans Verkuil74753cffa2014-04-07 09:23:50 -03002787 q->fileio = NULL;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002788 kfree(fileio);
2789 return ret;
2790}
2791
2792/**
2793 * __vb2_cleanup_fileio() - free resourced used by file io emulator
2794 * @q: videobuf2 queue
2795 */
2796static int __vb2_cleanup_fileio(struct vb2_queue *q)
2797{
2798 struct vb2_fileio_data *fileio = q->fileio;
2799
2800 if (fileio) {
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002801 vb2_internal_streamoff(q, q->type);
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002802 q->fileio = NULL;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002803 fileio->req.count = 0;
2804 vb2_reqbufs(q, &fileio->req);
2805 kfree(fileio);
2806 dprintk(3, "file io emulator closed\n");
2807 }
2808 return 0;
2809}
2810
2811/**
2812 * __vb2_perform_fileio() - perform a single file io (read or write) operation
2813 * @q: videobuf2 queue
2814 * @data: pointed to target userspace buffer
2815 * @count: number of bytes to read or write
2816 * @ppos: file handle position tracking pointer
2817 * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
2818 * @read: access mode selector (1 means read, 0 means write)
2819 */
2820static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
2821 loff_t *ppos, int nonblock, int read)
2822{
2823 struct vb2_fileio_data *fileio;
2824 struct vb2_fileio_buf *buf;
Hans Verkuilebd7c502014-04-11 04:36:57 -03002825 /*
2826 * When using write() to write data to an output video node the vb2 core
2827 * should set timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody
2828 * else is able to provide this information with the write() operation.
2829 */
2830 bool set_timestamp = !read &&
2831 (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
2832 V4L2_BUF_FLAG_TIMESTAMP_COPY;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002833 int ret, index;
2834
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002835 dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002836 read ? "read" : "write", (long)*ppos, count,
2837 nonblock ? "non" : "");
2838
2839 if (!data)
2840 return -EINVAL;
2841
2842 /*
2843 * Initialize emulator on first call.
2844 */
Hans Verkuil74753cffa2014-04-07 09:23:50 -03002845 if (!vb2_fileio_is_active(q)) {
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002846 ret = __vb2_init_fileio(q, read);
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002847 dprintk(3, "vb2_init_fileio result: %d\n", ret);
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002848 if (ret)
2849 return ret;
2850 }
2851 fileio = q->fileio;
2852
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002853 /*
2854 * Check if we need to dequeue the buffer.
2855 */
Hans Verkuil4e5a4d82014-02-14 06:46:50 -03002856 index = fileio->cur_index;
Hans Verkuil88e26872013-12-13 13:13:45 -03002857 if (index >= q->num_buffers) {
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002858 /*
2859 * Call vb2_dqbuf to get buffer back.
2860 */
2861 memset(&fileio->b, 0, sizeof(fileio->b));
2862 fileio->b.type = q->type;
2863 fileio->b.memory = q->memory;
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002864 ret = vb2_internal_dqbuf(q, &fileio->b, nonblock);
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002865 dprintk(5, "vb2_dqbuf result: %d\n", ret);
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002866 if (ret)
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002867 return ret;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002868 fileio->dq_count += 1;
2869
Hans Verkuil4e5a4d82014-02-14 06:46:50 -03002870 fileio->cur_index = index = fileio->b.index;
Hans Verkuil88e26872013-12-13 13:13:45 -03002871 buf = &fileio->bufs[index];
2872
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002873 /*
2874 * Get number of bytes filled by the driver
2875 */
Hans Verkuil88e26872013-12-13 13:13:45 -03002876 buf->pos = 0;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002877 buf->queued = 0;
Hans Verkuil88e26872013-12-13 13:13:45 -03002878 buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
2879 : vb2_plane_size(q->bufs[index], 0);
2880 } else {
2881 buf = &fileio->bufs[index];
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002882 }
2883
2884 /*
2885 * Limit count on last few bytes of the buffer.
2886 */
2887 if (buf->pos + count > buf->size) {
2888 count = buf->size - buf->pos;
Mauro Carvalho Chehab08b99e22011-01-11 17:12:34 -03002889 dprintk(5, "reducing read count: %zd\n", count);
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002890 }
2891
2892 /*
2893 * Transfer data to userspace.
2894 */
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002895 dprintk(3, "copying %zd bytes - buffer %d, offset %u\n",
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002896 count, index, buf->pos);
2897 if (read)
2898 ret = copy_to_user(data, buf->vaddr + buf->pos, count);
2899 else
2900 ret = copy_from_user(buf->vaddr + buf->pos, data, count);
2901 if (ret) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002902 dprintk(3, "error copying data\n");
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002903 return -EFAULT;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002904 }
2905
2906 /*
2907 * Update counters.
2908 */
2909 buf->pos += count;
2910 *ppos += count;
2911
2912 /*
2913 * Queue next buffer if required.
2914 */
2915 if (buf->pos == buf->size ||
2916 (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) {
2917 /*
2918 * Check if this is the last buffer to read.
2919 */
2920 if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
2921 fileio->dq_count == 1) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002922 dprintk(3, "read limit reached\n");
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002923 return __vb2_cleanup_fileio(q);
2924 }
2925
2926 /*
2927 * Call vb2_qbuf and give buffer to the driver.
2928 */
2929 memset(&fileio->b, 0, sizeof(fileio->b));
2930 fileio->b.type = q->type;
2931 fileio->b.memory = q->memory;
2932 fileio->b.index = index;
2933 fileio->b.bytesused = buf->pos;
Hans Verkuilebd7c502014-04-11 04:36:57 -03002934 if (set_timestamp)
2935 v4l2_get_timestamp(&fileio->b.timestamp);
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002936 ret = vb2_internal_qbuf(q, &fileio->b);
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002937 dprintk(5, "vb2_dbuf result: %d\n", ret);
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002938 if (ret)
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002939 return ret;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002940
2941 /*
2942 * Buffer has been queued, update the status
2943 */
2944 buf->pos = 0;
2945 buf->queued = 1;
Hans Verkuil88e26872013-12-13 13:13:45 -03002946 buf->size = vb2_plane_size(q->bufs[index], 0);
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002947 fileio->q_count += 1;
Hans Verkuil4e5a4d82014-02-14 06:46:50 -03002948 /*
2949 * If we are queuing up buffers for the first time, then
2950 * increase initial_index by one.
2951 */
2952 if (fileio->initial_index < q->num_buffers)
2953 fileio->initial_index++;
2954 /*
2955 * The next buffer to use is either a buffer that's going to be
2956 * queued for the first time (initial_index < q->num_buffers)
2957 * or it is equal to q->num_buffers, meaning that the next
2958 * time we need to dequeue a buffer since we've now queued up
2959 * all the 'first time' buffers.
2960 */
2961 fileio->cur_index = fileio->initial_index;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002962 }
2963
2964 /*
2965 * Return proper number of bytes processed.
2966 */
2967 if (ret == 0)
2968 ret = count;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002969 return ret;
2970}
2971
2972size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
2973 loff_t *ppos, int nonblocking)
2974{
2975 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
2976}
2977EXPORT_SYMBOL_GPL(vb2_read);
2978
Ricardo Ribalda819585b2013-08-28 04:39:29 -03002979size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002980 loff_t *ppos, int nonblocking)
2981{
Ricardo Ribalda819585b2013-08-28 04:39:29 -03002982 return __vb2_perform_fileio(q, (char __user *) data, count,
2983 ppos, nonblocking, 0);
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002984}
2985EXPORT_SYMBOL_GPL(vb2_write);
2986
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03002987
2988/*
2989 * The following functions are not part of the vb2 core API, but are helper
2990 * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
2991 * and struct vb2_ops.
2992 * They contain boilerplate code that most if not all drivers have to do
2993 * and so they simplify the driver code.
2994 */
2995
2996/* The queue is busy if there is a owner and you are not that owner. */
2997static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
2998{
2999 return vdev->queue->owner && vdev->queue->owner != file->private_data;
3000}
3001
3002/* vb2 ioctl helpers */
3003
3004int vb2_ioctl_reqbufs(struct file *file, void *priv,
3005 struct v4l2_requestbuffers *p)
3006{
3007 struct video_device *vdev = video_devdata(file);
3008 int res = __verify_memory_type(vdev->queue, p->memory, p->type);
3009
3010 if (res)
3011 return res;
3012 if (vb2_queue_is_busy(vdev, file))
3013 return -EBUSY;
3014 res = __reqbufs(vdev->queue, p);
3015 /* If count == 0, then the owner has released all buffers and he
3016 is no longer owner of the queue. Otherwise we have a new owner. */
3017 if (res == 0)
3018 vdev->queue->owner = p->count ? file->private_data : NULL;
3019 return res;
3020}
3021EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
3022
3023int vb2_ioctl_create_bufs(struct file *file, void *priv,
3024 struct v4l2_create_buffers *p)
3025{
3026 struct video_device *vdev = video_devdata(file);
3027 int res = __verify_memory_type(vdev->queue, p->memory, p->format.type);
3028
3029 p->index = vdev->queue->num_buffers;
3030 /* If count == 0, then just check if memory and type are valid.
3031 Any -EBUSY result from __verify_memory_type can be mapped to 0. */
3032 if (p->count == 0)
3033 return res != -EBUSY ? res : 0;
3034 if (res)
3035 return res;
3036 if (vb2_queue_is_busy(vdev, file))
3037 return -EBUSY;
3038 res = __create_bufs(vdev->queue, p);
3039 if (res == 0)
3040 vdev->queue->owner = file->private_data;
3041 return res;
3042}
3043EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
3044
3045int vb2_ioctl_prepare_buf(struct file *file, void *priv,
3046 struct v4l2_buffer *p)
3047{
3048 struct video_device *vdev = video_devdata(file);
3049
3050 if (vb2_queue_is_busy(vdev, file))
3051 return -EBUSY;
3052 return vb2_prepare_buf(vdev->queue, p);
3053}
3054EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
3055
3056int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
3057{
3058 struct video_device *vdev = video_devdata(file);
3059
3060 /* No need to call vb2_queue_is_busy(), anyone can query buffers. */
3061 return vb2_querybuf(vdev->queue, p);
3062}
3063EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
3064
3065int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
3066{
3067 struct video_device *vdev = video_devdata(file);
3068
3069 if (vb2_queue_is_busy(vdev, file))
3070 return -EBUSY;
3071 return vb2_qbuf(vdev->queue, p);
3072}
3073EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
3074
3075int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
3076{
3077 struct video_device *vdev = video_devdata(file);
3078
3079 if (vb2_queue_is_busy(vdev, file))
3080 return -EBUSY;
3081 return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
3082}
3083EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
3084
3085int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
3086{
3087 struct video_device *vdev = video_devdata(file);
3088
3089 if (vb2_queue_is_busy(vdev, file))
3090 return -EBUSY;
3091 return vb2_streamon(vdev->queue, i);
3092}
3093EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
3094
3095int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
3096{
3097 struct video_device *vdev = video_devdata(file);
3098
3099 if (vb2_queue_is_busy(vdev, file))
3100 return -EBUSY;
3101 return vb2_streamoff(vdev->queue, i);
3102}
3103EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
3104
Tomasz Stanislawski83ae7c52012-06-14 11:32:24 -03003105int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
3106{
3107 struct video_device *vdev = video_devdata(file);
3108
3109 if (vb2_queue_is_busy(vdev, file))
3110 return -EBUSY;
3111 return vb2_expbuf(vdev->queue, p);
3112}
3113EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
3114
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003115/* v4l2_file_operations helpers */
3116
3117int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
3118{
3119 struct video_device *vdev = video_devdata(file);
Laurent Pinchart8a90f1a2013-08-02 13:55:21 -03003120 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3121 int err;
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003122
Laurent Pinchart8a90f1a2013-08-02 13:55:21 -03003123 if (lock && mutex_lock_interruptible(lock))
3124 return -ERESTARTSYS;
3125 err = vb2_mmap(vdev->queue, vma);
3126 if (lock)
3127 mutex_unlock(lock);
3128 return err;
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003129}
3130EXPORT_SYMBOL_GPL(vb2_fop_mmap);
3131
Ricardo Ribalda1380f572013-11-25 05:49:02 -03003132int _vb2_fop_release(struct file *file, struct mutex *lock)
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003133{
3134 struct video_device *vdev = video_devdata(file);
3135
3136 if (file->private_data == vdev->queue->owner) {
Ricardo Ribalda1380f572013-11-25 05:49:02 -03003137 if (lock)
3138 mutex_lock(lock);
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003139 vb2_queue_release(vdev->queue);
3140 vdev->queue->owner = NULL;
Ricardo Ribalda1380f572013-11-25 05:49:02 -03003141 if (lock)
3142 mutex_unlock(lock);
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003143 }
3144 return v4l2_fh_release(file);
3145}
Ricardo Ribalda1380f572013-11-25 05:49:02 -03003146EXPORT_SYMBOL_GPL(_vb2_fop_release);
3147
3148int vb2_fop_release(struct file *file)
3149{
3150 struct video_device *vdev = video_devdata(file);
3151 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3152
3153 return _vb2_fop_release(file, lock);
3154}
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003155EXPORT_SYMBOL_GPL(vb2_fop_release);
3156
Ricardo Ribalda819585b2013-08-28 04:39:29 -03003157ssize_t vb2_fop_write(struct file *file, const char __user *buf,
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003158 size_t count, loff_t *ppos)
3159{
3160 struct video_device *vdev = video_devdata(file);
3161 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003162 int err = -EBUSY;
3163
Hans Verkuilcf533732012-07-31 04:02:25 -03003164 if (lock && mutex_lock_interruptible(lock))
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003165 return -ERESTARTSYS;
3166 if (vb2_queue_is_busy(vdev, file))
3167 goto exit;
3168 err = vb2_write(vdev->queue, buf, count, ppos,
3169 file->f_flags & O_NONBLOCK);
Hans Verkuil8c82c752012-09-07 12:50:02 -03003170 if (vdev->queue->fileio)
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003171 vdev->queue->owner = file->private_data;
3172exit:
Hans Verkuilcf533732012-07-31 04:02:25 -03003173 if (lock)
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003174 mutex_unlock(lock);
3175 return err;
3176}
3177EXPORT_SYMBOL_GPL(vb2_fop_write);
3178
3179ssize_t vb2_fop_read(struct file *file, char __user *buf,
3180 size_t count, loff_t *ppos)
3181{
3182 struct video_device *vdev = video_devdata(file);
3183 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003184 int err = -EBUSY;
3185
Hans Verkuilcf533732012-07-31 04:02:25 -03003186 if (lock && mutex_lock_interruptible(lock))
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003187 return -ERESTARTSYS;
3188 if (vb2_queue_is_busy(vdev, file))
3189 goto exit;
3190 err = vb2_read(vdev->queue, buf, count, ppos,
3191 file->f_flags & O_NONBLOCK);
Hans Verkuil8c82c752012-09-07 12:50:02 -03003192 if (vdev->queue->fileio)
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003193 vdev->queue->owner = file->private_data;
3194exit:
Hans Verkuilcf533732012-07-31 04:02:25 -03003195 if (lock)
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003196 mutex_unlock(lock);
3197 return err;
3198}
3199EXPORT_SYMBOL_GPL(vb2_fop_read);
3200
3201unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
3202{
3203 struct video_device *vdev = video_devdata(file);
3204 struct vb2_queue *q = vdev->queue;
3205 struct mutex *lock = q->lock ? q->lock : vdev->lock;
3206 unsigned long req_events = poll_requested_events(wait);
3207 unsigned res;
3208 void *fileio;
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003209 bool must_lock = false;
3210
3211 /* Try to be smart: only lock if polling might start fileio,
3212 otherwise locking will only introduce unwanted delays. */
Hans Verkuil74753cffa2014-04-07 09:23:50 -03003213 if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003214 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
3215 (req_events & (POLLIN | POLLRDNORM)))
3216 must_lock = true;
3217 else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
3218 (req_events & (POLLOUT | POLLWRNORM)))
3219 must_lock = true;
3220 }
3221
3222 /* If locking is needed, but this helper doesn't know how, then you
3223 shouldn't be using this helper but you should write your own. */
Hans Verkuilcf533732012-07-31 04:02:25 -03003224 WARN_ON(must_lock && !lock);
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003225
Hans Verkuilcf533732012-07-31 04:02:25 -03003226 if (must_lock && lock && mutex_lock_interruptible(lock))
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003227 return POLLERR;
3228
3229 fileio = q->fileio;
3230
3231 res = vb2_poll(vdev->queue, file, wait);
3232
3233 /* If fileio was started, then we have a new queue owner. */
3234 if (must_lock && !fileio && q->fileio)
3235 q->owner = file->private_data;
Hans Verkuilcf533732012-07-31 04:02:25 -03003236 if (must_lock && lock)
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003237 mutex_unlock(lock);
3238 return res;
3239}
3240EXPORT_SYMBOL_GPL(vb2_fop_poll);
3241
3242#ifndef CONFIG_MMU
3243unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
3244 unsigned long len, unsigned long pgoff, unsigned long flags)
3245{
3246 struct video_device *vdev = video_devdata(file);
Laurent Pinchart8a90f1a2013-08-02 13:55:21 -03003247 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3248 int ret;
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003249
Laurent Pinchart8a90f1a2013-08-02 13:55:21 -03003250 if (lock && mutex_lock_interruptible(lock))
3251 return -ERESTARTSYS;
3252 ret = vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
3253 if (lock)
3254 mutex_unlock(lock);
3255 return ret;
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003256}
3257EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
3258#endif
3259
3260/* vb2_ops helpers. Only use if vq->lock is non-NULL. */
3261
3262void vb2_ops_wait_prepare(struct vb2_queue *vq)
3263{
3264 mutex_unlock(vq->lock);
3265}
3266EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
3267
3268void vb2_ops_wait_finish(struct vb2_queue *vq)
3269{
3270 mutex_lock(vq->lock);
3271}
3272EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
3273
Pawel Osciake23ccc02010-10-11 10:56:41 -03003274MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
Pawel Osciak95072082011-03-13 15:23:32 -03003275MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
Pawel Osciake23ccc02010-10-11 10:56:41 -03003276MODULE_LICENSE("GPL");