blob: 07ab0677e259b55601aacf90a615f68c4ff87894 [file] [log] [blame]
Pawel Osciake23ccc02010-10-11 10:56:41 -03001/*
2 * videobuf2-core.c - V4L2 driver helper framework
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
Pawel Osciak95072082011-03-13 15:23:32 -03006 * Author: Pawel Osciak <pawel@osciak.com>
Pawel Osciake23ccc02010-10-11 10:56:41 -03007 * Marek Szyprowski <m.szyprowski@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation.
12 */
13
14#include <linux/err.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mm.h>
18#include <linux/poll.h>
19#include <linux/slab.h>
20#include <linux/sched.h>
21
Hans Verkuil95213ce2011-07-13 04:26:52 -030022#include <media/v4l2-dev.h>
23#include <media/v4l2-fh.h>
24#include <media/v4l2-event.h>
Hans Verkuilebd7c502014-04-11 04:36:57 -030025#include <media/v4l2-common.h>
Pawel Osciake23ccc02010-10-11 10:56:41 -030026#include <media/videobuf2-core.h>
27
28static int debug;
29module_param(debug, int, 0644);
30
Hans Verkuilfd4354c2014-04-07 09:08:47 -030031#define dprintk(level, fmt, arg...) \
32 do { \
33 if (debug >= level) \
34 pr_debug("vb2: %s: " fmt, __func__, ## arg); \
Pawel Osciake23ccc02010-10-11 10:56:41 -030035 } while (0)
36
Hans Verkuilb5b45412014-01-29 11:53:25 -030037#ifdef CONFIG_VIDEO_ADV_DEBUG
38
39/*
Hans Verkuila1d36d82014-03-17 09:54:21 -030040 * If advanced debugging is on, then count how often each op is called
41 * successfully, which can either be per-buffer or per-queue.
Hans Verkuilb5b45412014-01-29 11:53:25 -030042 *
Hans Verkuila1d36d82014-03-17 09:54:21 -030043 * This makes it easy to check that the 'init' and 'cleanup'
Hans Verkuilb5b45412014-01-29 11:53:25 -030044 * (and variations thereof) stay balanced.
45 */
46
Hans Verkuila1d36d82014-03-17 09:54:21 -030047#define log_memop(vb, op) \
48 dprintk(2, "call_memop(%p, %d, %s)%s\n", \
49 (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \
50 (vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
51
Hans Verkuilb5b45412014-01-29 11:53:25 -030052#define call_memop(vb, op, args...) \
53({ \
54 struct vb2_queue *_q = (vb)->vb2_queue; \
Hans Verkuila1d36d82014-03-17 09:54:21 -030055 int err; \
56 \
57 log_memop(vb, op); \
58 err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \
59 if (!err) \
60 (vb)->cnt_mem_ ## op++; \
61 err; \
Hans Verkuilb5b45412014-01-29 11:53:25 -030062})
Hans Verkuila1d36d82014-03-17 09:54:21 -030063
64#define call_ptr_memop(vb, op, args...) \
65({ \
66 struct vb2_queue *_q = (vb)->vb2_queue; \
67 void *ptr; \
68 \
69 log_memop(vb, op); \
70 ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \
71 if (!IS_ERR_OR_NULL(ptr)) \
72 (vb)->cnt_mem_ ## op++; \
73 ptr; \
74})
75
76#define call_void_memop(vb, op, args...) \
77({ \
78 struct vb2_queue *_q = (vb)->vb2_queue; \
79 \
80 log_memop(vb, op); \
81 if (_q->mem_ops->op) \
82 _q->mem_ops->op(args); \
83 (vb)->cnt_mem_ ## op++; \
84})
85
86#define log_qop(q, op) \
87 dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
88 (q)->ops->op ? "" : " (nop)")
Pawel Osciake23ccc02010-10-11 10:56:41 -030089
90#define call_qop(q, op, args...) \
Hans Verkuilb5b45412014-01-29 11:53:25 -030091({ \
Hans Verkuila1d36d82014-03-17 09:54:21 -030092 int err; \
93 \
94 log_qop(q, op); \
95 err = (q)->ops->op ? (q)->ops->op(args) : 0; \
96 if (!err) \
97 (q)->cnt_ ## op++; \
98 err; \
Hans Verkuilb5b45412014-01-29 11:53:25 -030099})
Hans Verkuila1d36d82014-03-17 09:54:21 -0300100
101#define call_void_qop(q, op, args...) \
102({ \
103 log_qop(q, op); \
104 if ((q)->ops->op) \
105 (q)->ops->op(args); \
106 (q)->cnt_ ## op++; \
107})
108
109#define log_vb_qop(vb, op, args...) \
110 dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \
111 (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \
112 (vb)->vb2_queue->ops->op ? "" : " (nop)")
Hans Verkuilb5b45412014-01-29 11:53:25 -0300113
114#define call_vb_qop(vb, op, args...) \
115({ \
Hans Verkuila1d36d82014-03-17 09:54:21 -0300116 int err; \
117 \
118 log_vb_qop(vb, op); \
119 err = (vb)->vb2_queue->ops->op ? \
120 (vb)->vb2_queue->ops->op(args) : 0; \
121 if (!err) \
122 (vb)->cnt_ ## op++; \
123 err; \
Hans Verkuilb5b45412014-01-29 11:53:25 -0300124})
Hans Verkuila1d36d82014-03-17 09:54:21 -0300125
126#define call_void_vb_qop(vb, op, args...) \
127({ \
128 log_vb_qop(vb, op); \
129 if ((vb)->vb2_queue->ops->op) \
130 (vb)->vb2_queue->ops->op(args); \
131 (vb)->cnt_ ## op++; \
132})
Hans Verkuilb5b45412014-01-29 11:53:25 -0300133
134#else
135
136#define call_memop(vb, op, args...) \
Hans Verkuila1d36d82014-03-17 09:54:21 -0300137 ((vb)->vb2_queue->mem_ops->op ? \
138 (vb)->vb2_queue->mem_ops->op(args) : 0)
139
140#define call_ptr_memop(vb, op, args...) \
141 ((vb)->vb2_queue->mem_ops->op ? \
142 (vb)->vb2_queue->mem_ops->op(args) : NULL)
143
144#define call_void_memop(vb, op, args...) \
145 do { \
146 if ((vb)->vb2_queue->mem_ops->op) \
147 (vb)->vb2_queue->mem_ops->op(args); \
148 } while (0)
Hans Verkuilb5b45412014-01-29 11:53:25 -0300149
150#define call_qop(q, op, args...) \
151 ((q)->ops->op ? (q)->ops->op(args) : 0)
Hans Verkuila1d36d82014-03-17 09:54:21 -0300152
153#define call_void_qop(q, op, args...) \
154 do { \
155 if ((q)->ops->op) \
156 (q)->ops->op(args); \
157 } while (0)
Hans Verkuilb5b45412014-01-29 11:53:25 -0300158
159#define call_vb_qop(vb, op, args...) \
160 ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
Hans Verkuila1d36d82014-03-17 09:54:21 -0300161
162#define call_void_vb_qop(vb, op, args...) \
163 do { \
164 if ((vb)->vb2_queue->ops->op) \
165 (vb)->vb2_queue->ops->op(args); \
166 } while (0)
Hans Verkuilb5b45412014-01-29 11:53:25 -0300167
168#endif
Pawel Osciake23ccc02010-10-11 10:56:41 -0300169
Hans Verkuilf1343282014-02-24 14:44:50 -0300170/* Flags that are set by the vb2 core */
Sakari Ailus1b18e7a2012-10-22 17:10:16 -0300171#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300172 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
Sakari Ailus1b18e7a2012-10-22 17:10:16 -0300173 V4L2_BUF_FLAG_PREPARED | \
174 V4L2_BUF_FLAG_TIMESTAMP_MASK)
Hans Verkuilf1343282014-02-24 14:44:50 -0300175/* Output buffer flags that should be passed on to the driver */
176#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
177 V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
Marek Szyprowskiea42c8e2011-04-12 10:14:13 -0300178
Hans Verkuilfb64dca2014-02-28 12:49:18 -0300179static void __vb2_queue_cancel(struct vb2_queue *q);
180
Pawel Osciake23ccc02010-10-11 10:56:41 -0300181/**
182 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
183 */
Marek Szyprowskic1426bc2011-08-24 06:36:26 -0300184static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300185{
186 struct vb2_queue *q = vb->vb2_queue;
187 void *mem_priv;
188 int plane;
189
Mauro Carvalho Chehab7f841452013-04-19 07:18:01 -0300190 /*
191 * Allocate memory for all planes in this buffer
192 * NOTE: mmapped areas should be page aligned
193 */
Pawel Osciake23ccc02010-10-11 10:56:41 -0300194 for (plane = 0; plane < vb->num_planes; ++plane) {
Mauro Carvalho Chehab7f841452013-04-19 07:18:01 -0300195 unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]);
196
Hans Verkuila1d36d82014-03-17 09:54:21 -0300197 mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane],
Mauro Carvalho Chehab7f841452013-04-19 07:18:01 -0300198 size, q->gfp_flags);
Guennadi Liakhovetski62a79432011-03-22 09:24:58 -0300199 if (IS_ERR_OR_NULL(mem_priv))
Pawel Osciake23ccc02010-10-11 10:56:41 -0300200 goto free;
201
202 /* Associate allocator private data with this plane */
203 vb->planes[plane].mem_priv = mem_priv;
Marek Szyprowskic1426bc2011-08-24 06:36:26 -0300204 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
Pawel Osciake23ccc02010-10-11 10:56:41 -0300205 }
206
207 return 0;
208free:
209 /* Free already allocated memory if one of the allocations failed */
Marek Szyprowskia00d0262011-12-15 05:53:06 -0300210 for (; plane > 0; --plane) {
Hans Verkuila1d36d82014-03-17 09:54:21 -0300211 call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
Marek Szyprowskia00d0262011-12-15 05:53:06 -0300212 vb->planes[plane - 1].mem_priv = NULL;
213 }
Pawel Osciake23ccc02010-10-11 10:56:41 -0300214
215 return -ENOMEM;
216}
217
218/**
219 * __vb2_buf_mem_free() - free memory of the given buffer
220 */
221static void __vb2_buf_mem_free(struct vb2_buffer *vb)
222{
Pawel Osciake23ccc02010-10-11 10:56:41 -0300223 unsigned int plane;
224
225 for (plane = 0; plane < vb->num_planes; ++plane) {
Hans Verkuila1d36d82014-03-17 09:54:21 -0300226 call_void_memop(vb, put, vb->planes[plane].mem_priv);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300227 vb->planes[plane].mem_priv = NULL;
Marek Szyprowskia00d0262011-12-15 05:53:06 -0300228 dprintk(3, "Freed plane %d of buffer %d\n", plane,
229 vb->v4l2_buf.index);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300230 }
231}
232
233/**
234 * __vb2_buf_userptr_put() - release userspace memory associated with
235 * a USERPTR buffer
236 */
237static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
238{
Pawel Osciake23ccc02010-10-11 10:56:41 -0300239 unsigned int plane;
240
241 for (plane = 0; plane < vb->num_planes; ++plane) {
Marek Szyprowskia00d0262011-12-15 05:53:06 -0300242 if (vb->planes[plane].mem_priv)
Hans Verkuila1d36d82014-03-17 09:54:21 -0300243 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
Marek Szyprowskia00d0262011-12-15 05:53:06 -0300244 vb->planes[plane].mem_priv = NULL;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300245 }
246}
247
248/**
Sumit Semwalc5384042012-06-14 10:37:37 -0300249 * __vb2_plane_dmabuf_put() - release memory associated with
250 * a DMABUF shared plane
251 */
Hans Verkuilb5b45412014-01-29 11:53:25 -0300252static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
Sumit Semwalc5384042012-06-14 10:37:37 -0300253{
254 if (!p->mem_priv)
255 return;
256
257 if (p->dbuf_mapped)
Hans Verkuila1d36d82014-03-17 09:54:21 -0300258 call_void_memop(vb, unmap_dmabuf, p->mem_priv);
Sumit Semwalc5384042012-06-14 10:37:37 -0300259
Hans Verkuila1d36d82014-03-17 09:54:21 -0300260 call_void_memop(vb, detach_dmabuf, p->mem_priv);
Sumit Semwalc5384042012-06-14 10:37:37 -0300261 dma_buf_put(p->dbuf);
262 memset(p, 0, sizeof(*p));
263}
264
265/**
266 * __vb2_buf_dmabuf_put() - release memory associated with
267 * a DMABUF shared buffer
268 */
269static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
270{
Sumit Semwalc5384042012-06-14 10:37:37 -0300271 unsigned int plane;
272
273 for (plane = 0; plane < vb->num_planes; ++plane)
Hans Verkuilb5b45412014-01-29 11:53:25 -0300274 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
Sumit Semwalc5384042012-06-14 10:37:37 -0300275}
276
277/**
Hans Verkuila5e3d742013-12-04 15:14:05 +0100278 * __setup_lengths() - setup initial lengths for every plane in
279 * every buffer on the queue
280 */
281static void __setup_lengths(struct vb2_queue *q, unsigned int n)
282{
283 unsigned int buffer, plane;
284 struct vb2_buffer *vb;
285
286 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
287 vb = q->bufs[buffer];
288 if (!vb)
289 continue;
290
291 for (plane = 0; plane < vb->num_planes; ++plane)
292 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
293 }
294}
295
296/**
Pawel Osciake23ccc02010-10-11 10:56:41 -0300297 * __setup_offsets() - setup unique offsets ("cookies") for every plane in
298 * every buffer on the queue
299 */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300300static void __setup_offsets(struct vb2_queue *q, unsigned int n)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300301{
302 unsigned int buffer, plane;
303 struct vb2_buffer *vb;
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300304 unsigned long off;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300305
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300306 if (q->num_buffers) {
307 struct v4l2_plane *p;
308 vb = q->bufs[q->num_buffers - 1];
309 p = &vb->v4l2_planes[vb->num_planes - 1];
310 off = PAGE_ALIGN(p->m.mem_offset + p->length);
311 } else {
312 off = 0;
313 }
314
315 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
Pawel Osciake23ccc02010-10-11 10:56:41 -0300316 vb = q->bufs[buffer];
317 if (!vb)
318 continue;
319
320 for (plane = 0; plane < vb->num_planes; ++plane) {
321 vb->v4l2_planes[plane].m.mem_offset = off;
322
323 dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
324 buffer, plane, off);
325
326 off += vb->v4l2_planes[plane].length;
327 off = PAGE_ALIGN(off);
328 }
329 }
330}
331
332/**
333 * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
334 * video buffer memory for all buffers/planes on the queue and initializes the
335 * queue
336 *
337 * Returns the number of buffers successfully allocated.
338 */
339static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
Marek Szyprowskic1426bc2011-08-24 06:36:26 -0300340 unsigned int num_buffers, unsigned int num_planes)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300341{
342 unsigned int buffer;
343 struct vb2_buffer *vb;
344 int ret;
345
346 for (buffer = 0; buffer < num_buffers; ++buffer) {
347 /* Allocate videobuf buffer structures */
348 vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
349 if (!vb) {
350 dprintk(1, "Memory alloc for buffer struct failed\n");
351 break;
352 }
353
354 /* Length stores number of planes for multiplanar buffers */
355 if (V4L2_TYPE_IS_MULTIPLANAR(q->type))
356 vb->v4l2_buf.length = num_planes;
357
358 vb->state = VB2_BUF_STATE_DEQUEUED;
359 vb->vb2_queue = q;
360 vb->num_planes = num_planes;
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300361 vb->v4l2_buf.index = q->num_buffers + buffer;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300362 vb->v4l2_buf.type = q->type;
363 vb->v4l2_buf.memory = memory;
364
365 /* Allocate video buffer memory for the MMAP type */
366 if (memory == V4L2_MEMORY_MMAP) {
Marek Szyprowskic1426bc2011-08-24 06:36:26 -0300367 ret = __vb2_buf_mem_alloc(vb);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300368 if (ret) {
369 dprintk(1, "Failed allocating memory for "
370 "buffer %d\n", buffer);
371 kfree(vb);
372 break;
373 }
374 /*
375 * Call the driver-provided buffer initialization
376 * callback, if given. An error in initialization
377 * results in queue setup failure.
378 */
Hans Verkuilb5b45412014-01-29 11:53:25 -0300379 ret = call_vb_qop(vb, buf_init, vb);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300380 if (ret) {
381 dprintk(1, "Buffer %d %p initialization"
382 " failed\n", buffer, vb);
383 __vb2_buf_mem_free(vb);
384 kfree(vb);
385 break;
386 }
387 }
388
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300389 q->bufs[q->num_buffers + buffer] = vb;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300390 }
391
Hans Verkuila5e3d742013-12-04 15:14:05 +0100392 __setup_lengths(q, buffer);
Philipp Zabeldc775232013-09-19 04:37:29 -0300393 if (memory == V4L2_MEMORY_MMAP)
394 __setup_offsets(q, buffer);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300395
396 dprintk(1, "Allocated %d buffers, %d plane(s) each\n",
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300397 buffer, num_planes);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300398
399 return buffer;
400}
401
402/**
403 * __vb2_free_mem() - release all video buffer memory for a given queue
404 */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300405static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300406{
407 unsigned int buffer;
408 struct vb2_buffer *vb;
409
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300410 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
411 ++buffer) {
Pawel Osciake23ccc02010-10-11 10:56:41 -0300412 vb = q->bufs[buffer];
413 if (!vb)
414 continue;
415
416 /* Free MMAP buffers or release USERPTR buffers */
417 if (q->memory == V4L2_MEMORY_MMAP)
418 __vb2_buf_mem_free(vb);
Sumit Semwalc5384042012-06-14 10:37:37 -0300419 else if (q->memory == V4L2_MEMORY_DMABUF)
420 __vb2_buf_dmabuf_put(vb);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300421 else
422 __vb2_buf_userptr_put(vb);
423 }
424}
425
426/**
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300427 * __vb2_queue_free() - free buffers at the end of the queue - video memory and
428 * related information, if no buffers are left return the queue to an
429 * uninitialized state. Might be called even if the queue has already been freed.
Pawel Osciake23ccc02010-10-11 10:56:41 -0300430 */
Hans Verkuil63faabf2013-12-13 13:13:40 -0300431static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300432{
433 unsigned int buffer;
434
Hans Verkuil63faabf2013-12-13 13:13:40 -0300435 /*
436 * Sanity check: when preparing a buffer the queue lock is released for
437 * a short while (see __buf_prepare for the details), which would allow
438 * a race with a reqbufs which can call this function. Removing the
439 * buffers from underneath __buf_prepare is obviously a bad idea, so we
440 * check if any of the buffers is in the state PREPARING, and if so we
441 * just return -EAGAIN.
442 */
443 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
444 ++buffer) {
445 if (q->bufs[buffer] == NULL)
446 continue;
447 if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300448 dprintk(1, "preparing buffers, cannot free\n");
Hans Verkuil63faabf2013-12-13 13:13:40 -0300449 return -EAGAIN;
450 }
451 }
452
Pawel Osciake23ccc02010-10-11 10:56:41 -0300453 /* Call driver-provided cleanup function for each buffer, if provided */
Hans Verkuilb5b45412014-01-29 11:53:25 -0300454 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
455 ++buffer) {
Hans Verkuil256f3162014-01-29 13:36:53 -0300456 struct vb2_buffer *vb = q->bufs[buffer];
457
458 if (vb && vb->planes[0].mem_priv)
Hans Verkuila1d36d82014-03-17 09:54:21 -0300459 call_void_vb_qop(vb, buf_cleanup, vb);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300460 }
461
462 /* Release video buffer memory */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300463 __vb2_free_mem(q, buffers);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300464
Hans Verkuilb5b45412014-01-29 11:53:25 -0300465#ifdef CONFIG_VIDEO_ADV_DEBUG
466 /*
467 * Check that all the calls were balances during the life-time of this
468 * queue. If not (or if the debug level is 1 or up), then dump the
469 * counters to the kernel log.
470 */
471 if (q->num_buffers) {
472 bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
473 q->cnt_wait_prepare != q->cnt_wait_finish;
474
475 if (unbalanced || debug) {
476 pr_info("vb2: counters for queue %p:%s\n", q,
477 unbalanced ? " UNBALANCED!" : "");
478 pr_info("vb2: setup: %u start_streaming: %u stop_streaming: %u\n",
479 q->cnt_queue_setup, q->cnt_start_streaming,
480 q->cnt_stop_streaming);
481 pr_info("vb2: wait_prepare: %u wait_finish: %u\n",
482 q->cnt_wait_prepare, q->cnt_wait_finish);
483 }
484 q->cnt_queue_setup = 0;
485 q->cnt_wait_prepare = 0;
486 q->cnt_wait_finish = 0;
487 q->cnt_start_streaming = 0;
488 q->cnt_stop_streaming = 0;
489 }
490 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
491 struct vb2_buffer *vb = q->bufs[buffer];
492 bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
493 vb->cnt_mem_prepare != vb->cnt_mem_finish ||
494 vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
495 vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
496 vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
497 vb->cnt_buf_queue != vb->cnt_buf_done ||
498 vb->cnt_buf_prepare != vb->cnt_buf_finish ||
499 vb->cnt_buf_init != vb->cnt_buf_cleanup;
500
501 if (unbalanced || debug) {
502 pr_info("vb2: counters for queue %p, buffer %d:%s\n",
503 q, buffer, unbalanced ? " UNBALANCED!" : "");
504 pr_info("vb2: buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
505 vb->cnt_buf_init, vb->cnt_buf_cleanup,
506 vb->cnt_buf_prepare, vb->cnt_buf_finish);
507 pr_info("vb2: buf_queue: %u buf_done: %u\n",
508 vb->cnt_buf_queue, vb->cnt_buf_done);
509 pr_info("vb2: alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
510 vb->cnt_mem_alloc, vb->cnt_mem_put,
511 vb->cnt_mem_prepare, vb->cnt_mem_finish,
512 vb->cnt_mem_mmap);
513 pr_info("vb2: get_userptr: %u put_userptr: %u\n",
514 vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
515 pr_info("vb2: attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
516 vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
517 vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
518 pr_info("vb2: get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
519 vb->cnt_mem_get_dmabuf,
520 vb->cnt_mem_num_users,
521 vb->cnt_mem_vaddr,
522 vb->cnt_mem_cookie);
523 }
524 }
525#endif
526
Pawel Osciake23ccc02010-10-11 10:56:41 -0300527 /* Free videobuf buffers */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300528 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
529 ++buffer) {
Pawel Osciake23ccc02010-10-11 10:56:41 -0300530 kfree(q->bufs[buffer]);
531 q->bufs[buffer] = NULL;
532 }
533
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300534 q->num_buffers -= buffers;
Hans Verkuila7afcac2014-02-24 13:41:20 -0300535 if (!q->num_buffers) {
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300536 q->memory = 0;
Hans Verkuila7afcac2014-02-24 13:41:20 -0300537 INIT_LIST_HEAD(&q->queued_list);
538 }
Hans Verkuil63faabf2013-12-13 13:13:40 -0300539 return 0;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300540}
541
542/**
543 * __verify_planes_array() - verify that the planes array passed in struct
544 * v4l2_buffer from userspace can be safely used
545 */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300546static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300547{
Hans Verkuil32a77262012-09-28 06:12:53 -0300548 if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
549 return 0;
550
Pawel Osciake23ccc02010-10-11 10:56:41 -0300551 /* Is memory for copying plane information present? */
552 if (NULL == b->m.planes) {
553 dprintk(1, "Multi-planar buffer passed but "
554 "planes array not provided\n");
555 return -EINVAL;
556 }
557
558 if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) {
559 dprintk(1, "Incorrect planes array length, "
560 "expected %d, got %d\n", vb->num_planes, b->length);
561 return -EINVAL;
562 }
563
564 return 0;
565}
566
567/**
Laurent Pinchart8023ed02012-07-10 10:41:40 -0300568 * __verify_length() - Verify that the bytesused value for each plane fits in
569 * the plane length and that the data offset doesn't exceed the bytesused value.
570 */
571static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
572{
573 unsigned int length;
574 unsigned int plane;
575
576 if (!V4L2_TYPE_IS_OUTPUT(b->type))
577 return 0;
578
579 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
580 for (plane = 0; plane < vb->num_planes; ++plane) {
581 length = (b->memory == V4L2_MEMORY_USERPTR)
582 ? b->m.planes[plane].length
583 : vb->v4l2_planes[plane].length;
584
585 if (b->m.planes[plane].bytesused > length)
586 return -EINVAL;
Sylwester Nawrocki3c5c23c2013-08-26 11:47:09 -0300587
588 if (b->m.planes[plane].data_offset > 0 &&
589 b->m.planes[plane].data_offset >=
Laurent Pinchart8023ed02012-07-10 10:41:40 -0300590 b->m.planes[plane].bytesused)
591 return -EINVAL;
592 }
593 } else {
594 length = (b->memory == V4L2_MEMORY_USERPTR)
595 ? b->length : vb->v4l2_planes[0].length;
596
597 if (b->bytesused > length)
598 return -EINVAL;
599 }
600
601 return 0;
602}
603
604/**
Marek Szyprowski25a27d92011-08-24 06:49:35 -0300605 * __buffer_in_use() - return true if the buffer is in use and
606 * the queue cannot be freed (by the means of REQBUFS(0)) call
607 */
608static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
609{
610 unsigned int plane;
611 for (plane = 0; plane < vb->num_planes; ++plane) {
Marek Szyprowski2c2dd6ac2011-10-12 13:09:53 -0300612 void *mem_priv = vb->planes[plane].mem_priv;
Marek Szyprowski25a27d92011-08-24 06:49:35 -0300613 /*
614 * If num_users() has not been provided, call_memop
615 * will return 0, apparently nobody cares about this
616 * case anyway. If num_users() returns more than 1,
617 * we are not the only user of the plane's memory.
618 */
Hans Verkuilb5b45412014-01-29 11:53:25 -0300619 if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
Marek Szyprowski25a27d92011-08-24 06:49:35 -0300620 return true;
621 }
622 return false;
623}
624
625/**
626 * __buffers_in_use() - return true if any buffers on the queue are in use and
627 * the queue cannot be freed (by the means of REQBUFS(0)) call
628 */
629static bool __buffers_in_use(struct vb2_queue *q)
630{
631 unsigned int buffer;
632 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
633 if (__buffer_in_use(q, q->bufs[buffer]))
634 return true;
635 }
636 return false;
637}
638
639/**
Pawel Osciake23ccc02010-10-11 10:56:41 -0300640 * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
641 * returned to userspace
642 */
Hans Verkuil32a77262012-09-28 06:12:53 -0300643static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300644{
645 struct vb2_queue *q = vb->vb2_queue;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300646
Sakari Ailus2b719d72012-05-02 09:40:03 -0300647 /* Copy back data such as timestamp, flags, etc. */
Pawel Osciake23ccc02010-10-11 10:56:41 -0300648 memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
Sakari Ailus2b719d72012-05-02 09:40:03 -0300649 b->reserved2 = vb->v4l2_buf.reserved2;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300650 b->reserved = vb->v4l2_buf.reserved;
651
652 if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
Pawel Osciake23ccc02010-10-11 10:56:41 -0300653 /*
654 * Fill in plane-related data if userspace provided an array
Hans Verkuil32a77262012-09-28 06:12:53 -0300655 * for it. The caller has already verified memory and size.
Pawel Osciake23ccc02010-10-11 10:56:41 -0300656 */
Hans Verkuil3c0b6062012-09-28 06:24:18 -0300657 b->length = vb->num_planes;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300658 memcpy(b->m.planes, vb->v4l2_planes,
659 b->length * sizeof(struct v4l2_plane));
660 } else {
661 /*
662 * We use length and offset in v4l2_planes array even for
663 * single-planar buffers, but userspace does not.
664 */
665 b->length = vb->v4l2_planes[0].length;
666 b->bytesused = vb->v4l2_planes[0].bytesused;
667 if (q->memory == V4L2_MEMORY_MMAP)
668 b->m.offset = vb->v4l2_planes[0].m.mem_offset;
669 else if (q->memory == V4L2_MEMORY_USERPTR)
670 b->m.userptr = vb->v4l2_planes[0].m.userptr;
Sumit Semwalc5384042012-06-14 10:37:37 -0300671 else if (q->memory == V4L2_MEMORY_DMABUF)
672 b->m.fd = vb->v4l2_planes[0].m.fd;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300673 }
674
Marek Szyprowskiea42c8e2011-04-12 10:14:13 -0300675 /*
676 * Clear any buffer state related flags.
677 */
Sakari Ailus1b18e7a2012-10-22 17:10:16 -0300678 b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
Sakari Ailus7ce6fd82014-02-25 19:08:52 -0300679 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
680 if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
681 V4L2_BUF_FLAG_TIMESTAMP_COPY) {
682 /*
683 * For non-COPY timestamps, drop timestamp source bits
684 * and obtain the timestamp source from the queue.
685 */
686 b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
687 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
688 }
Pawel Osciake23ccc02010-10-11 10:56:41 -0300689
690 switch (vb->state) {
691 case VB2_BUF_STATE_QUEUED:
692 case VB2_BUF_STATE_ACTIVE:
693 b->flags |= V4L2_BUF_FLAG_QUEUED;
694 break;
695 case VB2_BUF_STATE_ERROR:
696 b->flags |= V4L2_BUF_FLAG_ERROR;
697 /* fall through */
698 case VB2_BUF_STATE_DONE:
699 b->flags |= V4L2_BUF_FLAG_DONE;
700 break;
Guennadi Liakhovetskiebc087d2011-08-31 06:51:10 -0300701 case VB2_BUF_STATE_PREPARED:
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300702 b->flags |= V4L2_BUF_FLAG_PREPARED;
703 break;
Hans Verkuilb18a8ff2013-12-13 13:13:38 -0300704 case VB2_BUF_STATE_PREPARING:
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300705 case VB2_BUF_STATE_DEQUEUED:
Pawel Osciake23ccc02010-10-11 10:56:41 -0300706 /* nothing */
707 break;
708 }
709
Marek Szyprowski25a27d92011-08-24 06:49:35 -0300710 if (__buffer_in_use(q, vb))
Pawel Osciake23ccc02010-10-11 10:56:41 -0300711 b->flags |= V4L2_BUF_FLAG_MAPPED;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300712}
713
714/**
715 * vb2_querybuf() - query video buffer information
716 * @q: videobuf queue
717 * @b: buffer struct passed from userspace to vidioc_querybuf handler
718 * in driver
719 *
720 * Should be called from vidioc_querybuf ioctl handler in driver.
721 * This function will verify the passed v4l2_buffer structure and fill the
722 * relevant information for the userspace.
723 *
724 * The return values from this function are intended to be directly returned
725 * from vidioc_querybuf handler in driver.
726 */
727int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
728{
729 struct vb2_buffer *vb;
Hans Verkuil32a77262012-09-28 06:12:53 -0300730 int ret;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300731
732 if (b->type != q->type) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300733 dprintk(1, "wrong buffer type\n");
Pawel Osciake23ccc02010-10-11 10:56:41 -0300734 return -EINVAL;
735 }
736
737 if (b->index >= q->num_buffers) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300738 dprintk(1, "buffer index out of range\n");
Pawel Osciake23ccc02010-10-11 10:56:41 -0300739 return -EINVAL;
740 }
741 vb = q->bufs[b->index];
Hans Verkuil32a77262012-09-28 06:12:53 -0300742 ret = __verify_planes_array(vb, b);
743 if (!ret)
744 __fill_v4l2_buffer(vb, b);
745 return ret;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300746}
747EXPORT_SYMBOL(vb2_querybuf);
748
749/**
750 * __verify_userptr_ops() - verify that all memory operations required for
751 * USERPTR queue type have been provided
752 */
753static int __verify_userptr_ops(struct vb2_queue *q)
754{
755 if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
756 !q->mem_ops->put_userptr)
757 return -EINVAL;
758
759 return 0;
760}
761
762/**
763 * __verify_mmap_ops() - verify that all memory operations required for
764 * MMAP queue type have been provided
765 */
766static int __verify_mmap_ops(struct vb2_queue *q)
767{
768 if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
769 !q->mem_ops->put || !q->mem_ops->mmap)
770 return -EINVAL;
771
772 return 0;
773}
774
775/**
Sumit Semwalc5384042012-06-14 10:37:37 -0300776 * __verify_dmabuf_ops() - verify that all memory operations required for
777 * DMABUF queue type have been provided
778 */
779static int __verify_dmabuf_ops(struct vb2_queue *q)
780{
781 if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
782 !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
783 !q->mem_ops->unmap_dmabuf)
784 return -EINVAL;
785
786 return 0;
787}
788
789/**
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300790 * __verify_memory_type() - Check whether the memory type and buffer type
791 * passed to a buffer operation are compatible with the queue.
792 */
793static int __verify_memory_type(struct vb2_queue *q,
794 enum v4l2_memory memory, enum v4l2_buf_type type)
795{
Sumit Semwalc5384042012-06-14 10:37:37 -0300796 if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&
797 memory != V4L2_MEMORY_DMABUF) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300798 dprintk(1, "unsupported memory type\n");
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300799 return -EINVAL;
800 }
801
802 if (type != q->type) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300803 dprintk(1, "requested type is incorrect\n");
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300804 return -EINVAL;
805 }
806
807 /*
808 * Make sure all the required memory ops for given memory type
809 * are available.
810 */
811 if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300812 dprintk(1, "MMAP for current setup unsupported\n");
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300813 return -EINVAL;
814 }
815
816 if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300817 dprintk(1, "USERPTR for current setup unsupported\n");
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300818 return -EINVAL;
819 }
820
Sumit Semwalc5384042012-06-14 10:37:37 -0300821 if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300822 dprintk(1, "DMABUF for current setup unsupported\n");
Sumit Semwalc5384042012-06-14 10:37:37 -0300823 return -EINVAL;
824 }
825
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300826 /*
827 * Place the busy tests at the end: -EBUSY can be ignored when
828 * create_bufs is called with count == 0, but count == 0 should still
829 * do the memory and type validation.
830 */
831 if (q->fileio) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300832 dprintk(1, "file io in progress\n");
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300833 return -EBUSY;
834 }
835 return 0;
836}
837
838/**
839 * __reqbufs() - Initiate streaming
Pawel Osciake23ccc02010-10-11 10:56:41 -0300840 * @q: videobuf2 queue
841 * @req: struct passed from userspace to vidioc_reqbufs handler in driver
842 *
843 * Should be called from vidioc_reqbufs ioctl handler of a driver.
844 * This function:
845 * 1) verifies streaming parameters passed from the userspace,
846 * 2) sets up the queue,
847 * 3) negotiates number of buffers and planes per buffer with the driver
848 * to be used during streaming,
849 * 4) allocates internal buffer structures (struct vb2_buffer), according to
850 * the agreed parameters,
851 * 5) for MMAP memory type, allocates actual video memory, using the
852 * memory handling/allocation routines provided during queue initialization
853 *
854 * If req->count is 0, all the memory will be freed instead.
855 * If the queue has been allocated previously (by a previous vb2_reqbufs) call
856 * and the queue is not busy, memory will be reallocated.
857 *
858 * The return values from this function are intended to be directly returned
859 * from vidioc_reqbufs handler in driver.
860 */
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300861static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300862{
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300863 unsigned int num_buffers, allocated_buffers, num_planes = 0;
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300864 int ret;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300865
866 if (q->streaming) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300867 dprintk(1, "streaming active\n");
Pawel Osciake23ccc02010-10-11 10:56:41 -0300868 return -EBUSY;
869 }
870
Marek Szyprowski29e3fbd2011-03-09 14:03:24 -0300871 if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
Pawel Osciake23ccc02010-10-11 10:56:41 -0300872 /*
873 * We already have buffers allocated, so first check if they
874 * are not in use and can be freed.
875 */
876 if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -0300877 dprintk(1, "memory in use, cannot free\n");
Pawel Osciake23ccc02010-10-11 10:56:41 -0300878 return -EBUSY;
879 }
880
Hans Verkuilfb64dca2014-02-28 12:49:18 -0300881 /*
882 * Call queue_cancel to clean up any buffers in the PREPARED or
883 * QUEUED state which is possible if buffers were prepared or
884 * queued without ever calling STREAMON.
885 */
886 __vb2_queue_cancel(q);
Hans Verkuil63faabf2013-12-13 13:13:40 -0300887 ret = __vb2_queue_free(q, q->num_buffers);
888 if (ret)
889 return ret;
Marek Szyprowski29e3fbd2011-03-09 14:03:24 -0300890
891 /*
892 * In case of REQBUFS(0) return immediately without calling
893 * driver's queue_setup() callback and allocating resources.
894 */
895 if (req->count == 0)
896 return 0;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300897 }
898
899 /*
900 * Make sure the requested values and current defaults are sane.
901 */
902 num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
Hans Verkuilb3379c62014-02-24 13:51:03 -0300903 num_buffers = max_t(unsigned int, req->count, q->min_buffers_needed);
Marek Szyprowskic1426bc2011-08-24 06:36:26 -0300904 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
Pawel Osciake23ccc02010-10-11 10:56:41 -0300905 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
Marek Szyprowski13b14092011-04-14 07:17:44 -0300906 q->memory = req->memory;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300907
908 /*
909 * Ask the driver how many buffers and planes per buffer it requires.
910 * Driver also sets the size and allocator context for each plane.
911 */
Guennadi Liakhovetskifc714e72011-08-24 10:30:21 -0300912 ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes,
Marek Szyprowskic1426bc2011-08-24 06:36:26 -0300913 q->plane_sizes, q->alloc_ctx);
Hans Verkuila1d36d82014-03-17 09:54:21 -0300914 if (ret)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300915 return ret;
916
917 /* Finally, allocate buffers and video memory */
Hans Verkuila7afcac2014-02-24 13:41:20 -0300918 allocated_buffers = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
919 if (allocated_buffers == 0) {
Marek Szyprowski66072d42011-06-28 08:29:02 -0300920 dprintk(1, "Memory allocation failed\n");
921 return -ENOMEM;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300922 }
923
924 /*
Hans Verkuilb3379c62014-02-24 13:51:03 -0300925 * There is no point in continuing if we can't allocate the minimum
926 * number of buffers needed by this vb2_queue.
927 */
928 if (allocated_buffers < q->min_buffers_needed)
929 ret = -ENOMEM;
930
931 /*
Pawel Osciake23ccc02010-10-11 10:56:41 -0300932 * Check if driver can handle the allocated number of buffers.
933 */
Hans Verkuilb3379c62014-02-24 13:51:03 -0300934 if (!ret && allocated_buffers < num_buffers) {
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300935 num_buffers = allocated_buffers;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300936
Guennadi Liakhovetskifc714e72011-08-24 10:30:21 -0300937 ret = call_qop(q, queue_setup, q, NULL, &num_buffers,
938 &num_planes, q->plane_sizes, q->alloc_ctx);
Pawel Osciake23ccc02010-10-11 10:56:41 -0300939
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300940 if (!ret && allocated_buffers < num_buffers)
Pawel Osciake23ccc02010-10-11 10:56:41 -0300941 ret = -ENOMEM;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300942
943 /*
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300944 * Either the driver has accepted a smaller number of buffers,
945 * or .queue_setup() returned an error
Pawel Osciake23ccc02010-10-11 10:56:41 -0300946 */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300947 }
948
949 q->num_buffers = allocated_buffers;
950
951 if (ret < 0) {
Hans Verkuila7afcac2014-02-24 13:41:20 -0300952 /*
953 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
954 * from q->num_buffers.
955 */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300956 __vb2_queue_free(q, allocated_buffers);
957 return ret;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300958 }
959
Pawel Osciake23ccc02010-10-11 10:56:41 -0300960 /*
961 * Return the number of successfully allocated buffers
962 * to the userspace.
963 */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300964 req->count = allocated_buffers;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300965
966 return 0;
Pawel Osciake23ccc02010-10-11 10:56:41 -0300967}
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300968
969/**
970 * vb2_reqbufs() - Wrapper for __reqbufs() that also verifies the memory and
971 * type values.
972 * @q: videobuf2 queue
973 * @req: struct passed from userspace to vidioc_reqbufs handler in driver
974 */
975int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
976{
977 int ret = __verify_memory_type(q, req->memory, req->type);
978
979 return ret ? ret : __reqbufs(q, req);
980}
Pawel Osciake23ccc02010-10-11 10:56:41 -0300981EXPORT_SYMBOL_GPL(vb2_reqbufs);
982
983/**
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300984 * __create_bufs() - Allocate buffers and any required auxiliary structs
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300985 * @q: videobuf2 queue
986 * @create: creation parameters, passed from userspace to vidioc_create_bufs
987 * handler in driver
988 *
989 * Should be called from vidioc_create_bufs ioctl handler of a driver.
990 * This function:
991 * 1) verifies parameter sanity
992 * 2) calls the .queue_setup() queue operation
993 * 3) performs any necessary memory allocations
994 *
995 * The return values from this function are intended to be directly returned
996 * from vidioc_create_bufs handler in driver.
997 */
Hans Verkuil37d9ed92012-06-27 17:10:30 -0300998static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -0300999{
1000 unsigned int num_planes = 0, num_buffers, allocated_buffers;
Hans Verkuil37d9ed92012-06-27 17:10:30 -03001001 int ret;
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001002
1003 if (q->num_buffers == VIDEO_MAX_FRAME) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001004 dprintk(1, "maximum number of buffers already allocated\n");
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001005 return -ENOBUFS;
1006 }
1007
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001008 if (!q->num_buffers) {
1009 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
1010 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
1011 q->memory = create->memory;
1012 }
1013
1014 num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
1015
1016 /*
1017 * Ask the driver, whether the requested number of buffers, planes per
1018 * buffer and their sizes are acceptable
1019 */
1020 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
1021 &num_planes, q->plane_sizes, q->alloc_ctx);
Hans Verkuila1d36d82014-03-17 09:54:21 -03001022 if (ret)
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001023 return ret;
1024
1025 /* Finally, allocate buffers and video memory */
Hans Verkuila7afcac2014-02-24 13:41:20 -03001026 allocated_buffers = __vb2_queue_alloc(q, create->memory, num_buffers,
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001027 num_planes);
Hans Verkuila7afcac2014-02-24 13:41:20 -03001028 if (allocated_buffers == 0) {
Hans Verkuilf05393d22012-06-22 05:44:14 -03001029 dprintk(1, "Memory allocation failed\n");
1030 return -ENOMEM;
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001031 }
1032
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001033 /*
1034 * Check if driver can handle the so far allocated number of buffers.
1035 */
Hans Verkuila7afcac2014-02-24 13:41:20 -03001036 if (allocated_buffers < num_buffers) {
1037 num_buffers = allocated_buffers;
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001038
1039 /*
1040 * q->num_buffers contains the total number of buffers, that the
1041 * queue driver has set up
1042 */
1043 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
1044 &num_planes, q->plane_sizes, q->alloc_ctx);
1045
1046 if (!ret && allocated_buffers < num_buffers)
1047 ret = -ENOMEM;
1048
1049 /*
1050 * Either the driver has accepted a smaller number of buffers,
1051 * or .queue_setup() returned an error
1052 */
1053 }
1054
1055 q->num_buffers += allocated_buffers;
1056
1057 if (ret < 0) {
Hans Verkuila7afcac2014-02-24 13:41:20 -03001058 /*
1059 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
1060 * from q->num_buffers.
1061 */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001062 __vb2_queue_free(q, allocated_buffers);
Hans Verkuilf05393d22012-06-22 05:44:14 -03001063 return -ENOMEM;
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001064 }
1065
1066 /*
1067 * Return the number of successfully allocated buffers
1068 * to the userspace.
1069 */
1070 create->count = allocated_buffers;
1071
1072 return 0;
1073}
Hans Verkuil37d9ed92012-06-27 17:10:30 -03001074
1075/**
Nicolas THERY53aa3b12012-07-20 09:25:37 -03001076 * vb2_create_bufs() - Wrapper for __create_bufs() that also verifies the
1077 * memory and type values.
Hans Verkuil37d9ed92012-06-27 17:10:30 -03001078 * @q: videobuf2 queue
1079 * @create: creation parameters, passed from userspace to vidioc_create_bufs
1080 * handler in driver
1081 */
1082int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
1083{
1084 int ret = __verify_memory_type(q, create->memory, create->format.type);
1085
1086 create->index = q->num_buffers;
Hans Verkuilf05393d22012-06-22 05:44:14 -03001087 if (create->count == 0)
1088 return ret != -EBUSY ? ret : 0;
Hans Verkuil37d9ed92012-06-27 17:10:30 -03001089 return ret ? ret : __create_bufs(q, create);
1090}
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001091EXPORT_SYMBOL_GPL(vb2_create_bufs);
1092
1093/**
Pawel Osciake23ccc02010-10-11 10:56:41 -03001094 * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
1095 * @vb: vb2_buffer to which the plane in question belongs to
1096 * @plane_no: plane number for which the address is to be returned
1097 *
1098 * This function returns a kernel virtual address of a given plane if
1099 * such a mapping exist, NULL otherwise.
1100 */
1101void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
1102{
Marek Szyprowskia00d0262011-12-15 05:53:06 -03001103 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
Pawel Osciake23ccc02010-10-11 10:56:41 -03001104 return NULL;
1105
Hans Verkuila1d36d82014-03-17 09:54:21 -03001106 return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
Pawel Osciake23ccc02010-10-11 10:56:41 -03001107
1108}
1109EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
1110
1111/**
1112 * vb2_plane_cookie() - Return allocator specific cookie for the given plane
1113 * @vb: vb2_buffer to which the plane in question belongs to
1114 * @plane_no: plane number for which the cookie is to be returned
1115 *
1116 * This function returns an allocator specific cookie for a given plane if
1117 * available, NULL otherwise. The allocator should provide some simple static
1118 * inline function, which would convert this cookie to the allocator specific
1119 * type that can be used directly by the driver to access the buffer. This can
1120 * be for example physical address, pointer to scatter list or IOMMU mapping.
1121 */
1122void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
1123{
Marek Szyprowskia00d0262011-12-15 05:53:06 -03001124 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
Pawel Osciake23ccc02010-10-11 10:56:41 -03001125 return NULL;
1126
Hans Verkuila1d36d82014-03-17 09:54:21 -03001127 return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
Pawel Osciake23ccc02010-10-11 10:56:41 -03001128}
1129EXPORT_SYMBOL_GPL(vb2_plane_cookie);
1130
1131/**
1132 * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
1133 * @vb: vb2_buffer returned from the driver
1134 * @state: either VB2_BUF_STATE_DONE if the operation finished successfully
Hans Verkuilb3379c62014-02-24 13:51:03 -03001135 * or VB2_BUF_STATE_ERROR if the operation finished with an error.
1136 * If start_streaming fails then it should return buffers with state
1137 * VB2_BUF_STATE_QUEUED to put them back into the queue.
Pawel Osciake23ccc02010-10-11 10:56:41 -03001138 *
1139 * This function should be called by the driver after a hardware operation on
1140 * a buffer is finished and the buffer may be returned to userspace. The driver
1141 * cannot use this buffer anymore until it is queued back to it by videobuf
1142 * by the means of buf_queue callback. Only buffers previously queued to the
1143 * driver by buf_queue can be passed to this function.
Hans Verkuilb3379c62014-02-24 13:51:03 -03001144 *
1145 * While streaming a buffer can only be returned in state DONE or ERROR.
1146 * The start_streaming op can also return them in case the DMA engine cannot
1147 * be started for some reason. In that case the buffers should be returned with
1148 * state QUEUED.
Pawel Osciake23ccc02010-10-11 10:56:41 -03001149 */
1150void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
1151{
1152 struct vb2_queue *q = vb->vb2_queue;
1153 unsigned long flags;
Marek Szyprowski3e0c2f22012-06-14 10:37:43 -03001154 unsigned int plane;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001155
Hans Verkuilb3379c62014-02-24 13:51:03 -03001156 if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
Pawel Osciake23ccc02010-10-11 10:56:41 -03001157 return;
1158
Hans Verkuilb3379c62014-02-24 13:51:03 -03001159 if (!q->start_streaming_called) {
1160 if (WARN_ON(state != VB2_BUF_STATE_QUEUED))
1161 state = VB2_BUF_STATE_QUEUED;
1162 } else if (!WARN_ON(!q->start_streaming_called)) {
1163 if (WARN_ON(state != VB2_BUF_STATE_DONE &&
1164 state != VB2_BUF_STATE_ERROR))
1165 state = VB2_BUF_STATE_ERROR;
1166 }
Pawel Osciake23ccc02010-10-11 10:56:41 -03001167
Hans Verkuilb5b45412014-01-29 11:53:25 -03001168#ifdef CONFIG_VIDEO_ADV_DEBUG
1169 /*
1170 * Although this is not a callback, it still does have to balance
1171 * with the buf_queue op. So update this counter manually.
1172 */
1173 vb->cnt_buf_done++;
1174#endif
Pawel Osciake23ccc02010-10-11 10:56:41 -03001175 dprintk(4, "Done processing on buffer %d, state: %d\n",
Tushar Behera9b6f5dc2012-11-12 04:01:29 -03001176 vb->v4l2_buf.index, state);
Pawel Osciake23ccc02010-10-11 10:56:41 -03001177
Marek Szyprowski3e0c2f22012-06-14 10:37:43 -03001178 /* sync buffers */
1179 for (plane = 0; plane < vb->num_planes; ++plane)
Hans Verkuila1d36d82014-03-17 09:54:21 -03001180 call_void_memop(vb, finish, vb->planes[plane].mem_priv);
Marek Szyprowski3e0c2f22012-06-14 10:37:43 -03001181
Pawel Osciake23ccc02010-10-11 10:56:41 -03001182 /* Add the buffer to the done buffers list */
1183 spin_lock_irqsave(&q->done_lock, flags);
1184 vb->state = state;
Hans Verkuilb3379c62014-02-24 13:51:03 -03001185 if (state != VB2_BUF_STATE_QUEUED)
1186 list_add_tail(&vb->done_entry, &q->done_list);
Hans Verkuil6ea3b982014-02-06 05:46:11 -03001187 atomic_dec(&q->owned_by_drv_count);
Pawel Osciake23ccc02010-10-11 10:56:41 -03001188 spin_unlock_irqrestore(&q->done_lock, flags);
1189
Hans Verkuilb3379c62014-02-24 13:51:03 -03001190 if (state == VB2_BUF_STATE_QUEUED)
1191 return;
1192
Pawel Osciake23ccc02010-10-11 10:56:41 -03001193 /* Inform any processes that may be waiting for buffers */
1194 wake_up(&q->done_wq);
1195}
1196EXPORT_SYMBOL_GPL(vb2_buffer_done);
1197
1198/**
Hans Verkuil32a77262012-09-28 06:12:53 -03001199 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
1200 * v4l2_buffer by the userspace. The caller has already verified that struct
1201 * v4l2_buffer has a valid number of planes.
Pawel Osciake23ccc02010-10-11 10:56:41 -03001202 */
Hans Verkuil32a77262012-09-28 06:12:53 -03001203static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
Pawel Osciake23ccc02010-10-11 10:56:41 -03001204 struct v4l2_plane *v4l2_planes)
1205{
1206 unsigned int plane;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001207
1208 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
Pawel Osciake23ccc02010-10-11 10:56:41 -03001209 /* Fill in driver-provided information for OUTPUT types */
1210 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
Hans Verkuil61bd8fb2014-04-07 08:57:48 -03001211 bool bytesused_is_used;
1212
1213 /* Check if bytesused == 0 for all planes */
1214 for (plane = 0; plane < vb->num_planes; ++plane)
1215 if (b->m.planes[plane].bytesused)
1216 break;
1217 bytesused_is_used = plane < vb->num_planes;
1218
Pawel Osciake23ccc02010-10-11 10:56:41 -03001219 /*
1220 * Will have to go up to b->length when API starts
1221 * accepting variable number of planes.
Hans Verkuil61bd8fb2014-04-07 08:57:48 -03001222 *
1223 * If bytesused_is_used is false, then fall back to the
1224 * full buffer size. In that case userspace clearly
1225 * never bothered to set it and it's a safe assumption
1226 * that they really meant to use the full plane sizes.
Pawel Osciake23ccc02010-10-11 10:56:41 -03001227 */
1228 for (plane = 0; plane < vb->num_planes; ++plane) {
Hans Verkuil61bd8fb2014-04-07 08:57:48 -03001229 struct v4l2_plane *pdst = &v4l2_planes[plane];
1230 struct v4l2_plane *psrc = &b->m.planes[plane];
1231
1232 pdst->bytesused = bytesused_is_used ?
1233 psrc->bytesused : psrc->length;
1234 pdst->data_offset = psrc->data_offset;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001235 }
1236 }
1237
1238 if (b->memory == V4L2_MEMORY_USERPTR) {
1239 for (plane = 0; plane < vb->num_planes; ++plane) {
1240 v4l2_planes[plane].m.userptr =
1241 b->m.planes[plane].m.userptr;
1242 v4l2_planes[plane].length =
1243 b->m.planes[plane].length;
1244 }
1245 }
Sumit Semwalc5384042012-06-14 10:37:37 -03001246 if (b->memory == V4L2_MEMORY_DMABUF) {
1247 for (plane = 0; plane < vb->num_planes; ++plane) {
1248 v4l2_planes[plane].m.fd =
1249 b->m.planes[plane].m.fd;
1250 v4l2_planes[plane].length =
1251 b->m.planes[plane].length;
Sumit Semwalc5384042012-06-14 10:37:37 -03001252 }
1253 }
Pawel Osciake23ccc02010-10-11 10:56:41 -03001254 } else {
1255 /*
1256 * Single-planar buffers do not use planes array,
1257 * so fill in relevant v4l2_buffer struct fields instead.
1258 * In videobuf we use our internal V4l2_planes struct for
1259 * single-planar buffers as well, for simplicity.
Hans Verkuil61bd8fb2014-04-07 08:57:48 -03001260 *
1261 * If bytesused == 0, then fall back to the full buffer size
1262 * as that's a sensible default.
Pawel Osciake23ccc02010-10-11 10:56:41 -03001263 */
Hans Verkuil412376a2014-04-07 08:44:56 -03001264 if (V4L2_TYPE_IS_OUTPUT(b->type))
Hans Verkuil61bd8fb2014-04-07 08:57:48 -03001265 v4l2_planes[0].bytesused =
1266 b->bytesused ? b->bytesused : b->length;
1267 else
1268 v4l2_planes[0].bytesused = 0;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001269
1270 if (b->memory == V4L2_MEMORY_USERPTR) {
1271 v4l2_planes[0].m.userptr = b->m.userptr;
1272 v4l2_planes[0].length = b->length;
1273 }
Sumit Semwalc5384042012-06-14 10:37:37 -03001274
1275 if (b->memory == V4L2_MEMORY_DMABUF) {
1276 v4l2_planes[0].m.fd = b->m.fd;
1277 v4l2_planes[0].length = b->length;
Sumit Semwalc5384042012-06-14 10:37:37 -03001278 }
Pawel Osciake23ccc02010-10-11 10:56:41 -03001279 }
1280
Hans Verkuilf1343282014-02-24 14:44:50 -03001281 /* Zero flags that the vb2 core handles */
Sakari Ailus1b18e7a2012-10-22 17:10:16 -03001282 vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
Sakari Ailus7ce6fd82014-02-25 19:08:52 -03001283 if ((vb->vb2_queue->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
1284 V4L2_BUF_FLAG_TIMESTAMP_COPY || !V4L2_TYPE_IS_OUTPUT(b->type)) {
1285 /*
1286 * Non-COPY timestamps and non-OUTPUT queues will get
1287 * their timestamp and timestamp source flags from the
1288 * queue.
1289 */
1290 vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1291 }
1292
Hans Verkuilf1343282014-02-24 14:44:50 -03001293 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
1294 /*
1295 * For output buffers mask out the timecode flag:
1296 * this will be handled later in vb2_internal_qbuf().
1297 * The 'field' is valid metadata for this output buffer
1298 * and so that needs to be copied here.
1299 */
1300 vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TIMECODE;
1301 vb->v4l2_buf.field = b->field;
1302 } else {
1303 /* Zero any output buffer flags as this is a capture buffer */
1304 vb->v4l2_buf.flags &= ~V4L2_BUFFER_OUT_FLAGS;
1305 }
Pawel Osciake23ccc02010-10-11 10:56:41 -03001306}
1307
1308/**
Hans Verkuildcc24282014-03-10 12:23:13 -03001309 * __qbuf_mmap() - handle qbuf of an MMAP buffer
1310 */
1311static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1312{
1313 __fill_vb2_buffer(vb, b, vb->v4l2_planes);
1314 return call_vb_qop(vb, buf_prepare, vb);
1315}
1316
1317/**
Pawel Osciake23ccc02010-10-11 10:56:41 -03001318 * __qbuf_userptr() - handle qbuf of a USERPTR buffer
1319 */
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001320static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
Pawel Osciake23ccc02010-10-11 10:56:41 -03001321{
1322 struct v4l2_plane planes[VIDEO_MAX_PLANES];
1323 struct vb2_queue *q = vb->vb2_queue;
1324 void *mem_priv;
1325 unsigned int plane;
1326 int ret;
1327 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
Hans Verkuil256f3162014-01-29 13:36:53 -03001328 bool reacquired = vb->planes[0].mem_priv == NULL;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001329
Hans Verkuil412376a2014-04-07 08:44:56 -03001330 memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
Hans Verkuil32a77262012-09-28 06:12:53 -03001331 /* Copy relevant information provided by the userspace */
1332 __fill_vb2_buffer(vb, b, planes);
Pawel Osciake23ccc02010-10-11 10:56:41 -03001333
1334 for (plane = 0; plane < vb->num_planes; ++plane) {
1335 /* Skip the plane if already verified */
Marek Szyprowskif0b7c7f2011-11-16 15:09:40 -03001336 if (vb->v4l2_planes[plane].m.userptr &&
1337 vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
Pawel Osciake23ccc02010-10-11 10:56:41 -03001338 && vb->v4l2_planes[plane].length == planes[plane].length)
1339 continue;
1340
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001341 dprintk(3, "userspace address for plane %d changed, "
Pawel Osciake23ccc02010-10-11 10:56:41 -03001342 "reacquiring memory\n", plane);
1343
Marek Szyprowskic1426bc2011-08-24 06:36:26 -03001344 /* Check if the provided plane buffer is large enough */
1345 if (planes[plane].length < q->plane_sizes[plane]) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001346 dprintk(1, "provided buffer size %u is less than "
Seung-Woo Kim2484a7e2013-08-20 04:48:06 -03001347 "setup size %u for plane %d\n",
1348 planes[plane].length,
1349 q->plane_sizes[plane], plane);
Marek Szyprowski4c2625d2011-10-03 03:21:45 -03001350 ret = -EINVAL;
Marek Szyprowskic1426bc2011-08-24 06:36:26 -03001351 goto err;
1352 }
1353
Pawel Osciake23ccc02010-10-11 10:56:41 -03001354 /* Release previously acquired memory if present */
Hans Verkuil256f3162014-01-29 13:36:53 -03001355 if (vb->planes[plane].mem_priv) {
1356 if (!reacquired) {
1357 reacquired = true;
Hans Verkuila1d36d82014-03-17 09:54:21 -03001358 call_void_vb_qop(vb, buf_cleanup, vb);
Hans Verkuil256f3162014-01-29 13:36:53 -03001359 }
Hans Verkuila1d36d82014-03-17 09:54:21 -03001360 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
Hans Verkuil256f3162014-01-29 13:36:53 -03001361 }
Pawel Osciake23ccc02010-10-11 10:56:41 -03001362
1363 vb->planes[plane].mem_priv = NULL;
Hans Verkuil256f3162014-01-29 13:36:53 -03001364 memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
Pawel Osciake23ccc02010-10-11 10:56:41 -03001365
1366 /* Acquire each plane's memory */
Hans Verkuila1d36d82014-03-17 09:54:21 -03001367 mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane],
Marek Szyprowskia00d0262011-12-15 05:53:06 -03001368 planes[plane].m.userptr,
1369 planes[plane].length, write);
1370 if (IS_ERR_OR_NULL(mem_priv)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001371 dprintk(1, "failed acquiring userspace "
Pawel Osciake23ccc02010-10-11 10:56:41 -03001372 "memory for plane %d\n", plane);
Marek Szyprowskia00d0262011-12-15 05:53:06 -03001373 ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
1374 goto err;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001375 }
Marek Szyprowskia00d0262011-12-15 05:53:06 -03001376 vb->planes[plane].mem_priv = mem_priv;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001377 }
1378
1379 /*
Pawel Osciake23ccc02010-10-11 10:56:41 -03001380 * Now that everything is in order, copy relevant information
1381 * provided by userspace.
1382 */
1383 for (plane = 0; plane < vb->num_planes; ++plane)
1384 vb->v4l2_planes[plane] = planes[plane];
1385
Hans Verkuil256f3162014-01-29 13:36:53 -03001386 if (reacquired) {
1387 /*
1388 * One or more planes changed, so we must call buf_init to do
1389 * the driver-specific initialization on the newly acquired
1390 * buffer, if provided.
1391 */
1392 ret = call_vb_qop(vb, buf_init, vb);
1393 if (ret) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001394 dprintk(1, "buffer initialization failed\n");
Hans Verkuil256f3162014-01-29 13:36:53 -03001395 goto err;
1396 }
1397 }
1398
1399 ret = call_vb_qop(vb, buf_prepare, vb);
1400 if (ret) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001401 dprintk(1, "buffer preparation failed\n");
Hans Verkuila1d36d82014-03-17 09:54:21 -03001402 call_void_vb_qop(vb, buf_cleanup, vb);
Hans Verkuil256f3162014-01-29 13:36:53 -03001403 goto err;
1404 }
1405
Pawel Osciake23ccc02010-10-11 10:56:41 -03001406 return 0;
1407err:
1408 /* In case of errors, release planes that were already acquired */
Marek Szyprowskic1426bc2011-08-24 06:36:26 -03001409 for (plane = 0; plane < vb->num_planes; ++plane) {
1410 if (vb->planes[plane].mem_priv)
Hans Verkuila1d36d82014-03-17 09:54:21 -03001411 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
Marek Szyprowskic1426bc2011-08-24 06:36:26 -03001412 vb->planes[plane].mem_priv = NULL;
1413 vb->v4l2_planes[plane].m.userptr = 0;
1414 vb->v4l2_planes[plane].length = 0;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001415 }
1416
1417 return ret;
1418}
1419
1420/**
Sumit Semwalc5384042012-06-14 10:37:37 -03001421 * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
1422 */
1423static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1424{
1425 struct v4l2_plane planes[VIDEO_MAX_PLANES];
1426 struct vb2_queue *q = vb->vb2_queue;
1427 void *mem_priv;
1428 unsigned int plane;
1429 int ret;
1430 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
Hans Verkuil256f3162014-01-29 13:36:53 -03001431 bool reacquired = vb->planes[0].mem_priv == NULL;
Sumit Semwalc5384042012-06-14 10:37:37 -03001432
Hans Verkuil412376a2014-04-07 08:44:56 -03001433 memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
Laurent Pinchart6f546c52014-01-01 09:10:48 -03001434 /* Copy relevant information provided by the userspace */
Sumit Semwalc5384042012-06-14 10:37:37 -03001435 __fill_vb2_buffer(vb, b, planes);
1436
1437 for (plane = 0; plane < vb->num_planes; ++plane) {
1438 struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
1439
1440 if (IS_ERR_OR_NULL(dbuf)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001441 dprintk(1, "invalid dmabuf fd for plane %d\n",
Sumit Semwalc5384042012-06-14 10:37:37 -03001442 plane);
1443 ret = -EINVAL;
1444 goto err;
1445 }
1446
1447 /* use DMABUF size if length is not provided */
1448 if (planes[plane].length == 0)
1449 planes[plane].length = dbuf->size;
1450
Hans Verkuil412376a2014-04-07 08:44:56 -03001451 if (planes[plane].length < q->plane_sizes[plane]) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001452 dprintk(1, "invalid dmabuf length for plane %d\n",
Seung-Woo Kim77c07822013-11-29 04:50:29 -03001453 plane);
Sumit Semwalc5384042012-06-14 10:37:37 -03001454 ret = -EINVAL;
1455 goto err;
1456 }
1457
1458 /* Skip the plane if already verified */
1459 if (dbuf == vb->planes[plane].dbuf &&
1460 vb->v4l2_planes[plane].length == planes[plane].length) {
1461 dma_buf_put(dbuf);
1462 continue;
1463 }
1464
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001465 dprintk(1, "buffer for plane %d changed\n", plane);
Sumit Semwalc5384042012-06-14 10:37:37 -03001466
Hans Verkuil256f3162014-01-29 13:36:53 -03001467 if (!reacquired) {
1468 reacquired = true;
Hans Verkuila1d36d82014-03-17 09:54:21 -03001469 call_void_vb_qop(vb, buf_cleanup, vb);
Hans Verkuil256f3162014-01-29 13:36:53 -03001470 }
1471
Sumit Semwalc5384042012-06-14 10:37:37 -03001472 /* Release previously acquired memory if present */
Hans Verkuilb5b45412014-01-29 11:53:25 -03001473 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
Sumit Semwalc5384042012-06-14 10:37:37 -03001474 memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
1475
1476 /* Acquire each plane's memory */
Hans Verkuila1d36d82014-03-17 09:54:21 -03001477 mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane],
Sumit Semwalc5384042012-06-14 10:37:37 -03001478 dbuf, planes[plane].length, write);
1479 if (IS_ERR(mem_priv)) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001480 dprintk(1, "failed to attach dmabuf\n");
Sumit Semwalc5384042012-06-14 10:37:37 -03001481 ret = PTR_ERR(mem_priv);
1482 dma_buf_put(dbuf);
1483 goto err;
1484 }
1485
1486 vb->planes[plane].dbuf = dbuf;
1487 vb->planes[plane].mem_priv = mem_priv;
1488 }
1489
1490 /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but
1491 * really we want to do this just before the DMA, not while queueing
1492 * the buffer(s)..
1493 */
1494 for (plane = 0; plane < vb->num_planes; ++plane) {
Hans Verkuilb5b45412014-01-29 11:53:25 -03001495 ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
Sumit Semwalc5384042012-06-14 10:37:37 -03001496 if (ret) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001497 dprintk(1, "failed to map dmabuf for plane %d\n",
Sumit Semwalc5384042012-06-14 10:37:37 -03001498 plane);
1499 goto err;
1500 }
1501 vb->planes[plane].dbuf_mapped = 1;
1502 }
1503
1504 /*
Sumit Semwalc5384042012-06-14 10:37:37 -03001505 * Now that everything is in order, copy relevant information
1506 * provided by userspace.
1507 */
1508 for (plane = 0; plane < vb->num_planes; ++plane)
1509 vb->v4l2_planes[plane] = planes[plane];
1510
Hans Verkuil256f3162014-01-29 13:36:53 -03001511 if (reacquired) {
1512 /*
1513 * Call driver-specific initialization on the newly acquired buffer,
1514 * if provided.
1515 */
1516 ret = call_vb_qop(vb, buf_init, vb);
1517 if (ret) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001518 dprintk(1, "buffer initialization failed\n");
Hans Verkuil256f3162014-01-29 13:36:53 -03001519 goto err;
1520 }
1521 }
1522
1523 ret = call_vb_qop(vb, buf_prepare, vb);
1524 if (ret) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001525 dprintk(1, "buffer preparation failed\n");
Hans Verkuila1d36d82014-03-17 09:54:21 -03001526 call_void_vb_qop(vb, buf_cleanup, vb);
Hans Verkuil256f3162014-01-29 13:36:53 -03001527 goto err;
1528 }
1529
Sumit Semwalc5384042012-06-14 10:37:37 -03001530 return 0;
1531err:
1532 /* In case of errors, release planes that were already acquired */
1533 __vb2_buf_dmabuf_put(vb);
1534
1535 return ret;
1536}
1537
1538/**
Pawel Osciake23ccc02010-10-11 10:56:41 -03001539 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
1540 */
1541static void __enqueue_in_driver(struct vb2_buffer *vb)
1542{
1543 struct vb2_queue *q = vb->vb2_queue;
Marek Szyprowski3e0c2f22012-06-14 10:37:43 -03001544 unsigned int plane;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001545
1546 vb->state = VB2_BUF_STATE_ACTIVE;
Hans Verkuil6ea3b982014-02-06 05:46:11 -03001547 atomic_inc(&q->owned_by_drv_count);
Marek Szyprowski3e0c2f22012-06-14 10:37:43 -03001548
1549 /* sync buffers */
1550 for (plane = 0; plane < vb->num_planes; ++plane)
Hans Verkuila1d36d82014-03-17 09:54:21 -03001551 call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
Marek Szyprowski3e0c2f22012-06-14 10:37:43 -03001552
Hans Verkuila1d36d82014-03-17 09:54:21 -03001553 call_void_vb_qop(vb, buf_queue, vb);
Pawel Osciake23ccc02010-10-11 10:56:41 -03001554}
1555
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001556static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
Guennadi Liakhovetskiebc087d2011-08-31 06:51:10 -03001557{
1558 struct vb2_queue *q = vb->vb2_queue;
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001559 struct rw_semaphore *mmap_sem;
Guennadi Liakhovetskiebc087d2011-08-31 06:51:10 -03001560 int ret;
1561
Laurent Pinchart8023ed02012-07-10 10:41:40 -03001562 ret = __verify_length(vb, b);
Sylwester Nawrocki3a9621b2013-08-26 11:47:53 -03001563 if (ret < 0) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001564 dprintk(1, "plane parameters verification failed: %d\n", ret);
Laurent Pinchart8023ed02012-07-10 10:41:40 -03001565 return ret;
Sylwester Nawrocki3a9621b2013-08-26 11:47:53 -03001566 }
Laurent Pinchart8023ed02012-07-10 10:41:40 -03001567
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001568 vb->state = VB2_BUF_STATE_PREPARING;
Hans Verkuilf1343282014-02-24 14:44:50 -03001569 vb->v4l2_buf.timestamp.tv_sec = 0;
1570 vb->v4l2_buf.timestamp.tv_usec = 0;
1571 vb->v4l2_buf.sequence = 0;
1572
Guennadi Liakhovetskiebc087d2011-08-31 06:51:10 -03001573 switch (q->memory) {
1574 case V4L2_MEMORY_MMAP:
1575 ret = __qbuf_mmap(vb, b);
1576 break;
1577 case V4L2_MEMORY_USERPTR:
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001578 /*
Mauro Carvalho Chehabf103b5d2014-01-07 07:03:09 -02001579 * In case of user pointer buffers vb2 allocators need to get
1580 * direct access to userspace pages. This requires getting
1581 * the mmap semaphore for read access in the current process
1582 * structure. The same semaphore is taken before calling mmap
1583 * operation, while both qbuf/prepare_buf and mmap are called
1584 * by the driver or v4l2 core with the driver's lock held.
1585 * To avoid an AB-BA deadlock (mmap_sem then driver's lock in
1586 * mmap and driver's lock then mmap_sem in qbuf/prepare_buf),
1587 * the videobuf2 core releases the driver's lock, takes
1588 * mmap_sem and then takes the driver's lock again.
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001589 */
1590 mmap_sem = &current->mm->mmap_sem;
Hans Verkuila1d36d82014-03-17 09:54:21 -03001591 call_void_qop(q, wait_prepare, q);
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001592 down_read(mmap_sem);
Hans Verkuila1d36d82014-03-17 09:54:21 -03001593 call_void_qop(q, wait_finish, q);
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001594
Guennadi Liakhovetskiebc087d2011-08-31 06:51:10 -03001595 ret = __qbuf_userptr(vb, b);
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001596
1597 up_read(mmap_sem);
Guennadi Liakhovetskiebc087d2011-08-31 06:51:10 -03001598 break;
Sumit Semwalc5384042012-06-14 10:37:37 -03001599 case V4L2_MEMORY_DMABUF:
1600 ret = __qbuf_dmabuf(vb, b);
1601 break;
Guennadi Liakhovetskiebc087d2011-08-31 06:51:10 -03001602 default:
1603 WARN(1, "Invalid queue type\n");
1604 ret = -EINVAL;
1605 }
1606
Guennadi Liakhovetskiebc087d2011-08-31 06:51:10 -03001607 if (ret)
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001608 dprintk(1, "buffer preparation failed: %d\n", ret);
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001609 vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;
Guennadi Liakhovetskiebc087d2011-08-31 06:51:10 -03001610
1611 return ret;
1612}
1613
Laurent Pinchart012043b2013-08-09 08:11:26 -03001614static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
Hans Verkuil41381112013-12-13 13:13:39 -03001615 const char *opname)
Laurent Pinchart012043b2013-08-09 08:11:26 -03001616{
Laurent Pinchart012043b2013-08-09 08:11:26 -03001617 if (b->type != q->type) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001618 dprintk(1, "%s: invalid buffer type\n", opname);
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001619 return -EINVAL;
Laurent Pinchart012043b2013-08-09 08:11:26 -03001620 }
1621
1622 if (b->index >= q->num_buffers) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001623 dprintk(1, "%s: buffer index out of range\n", opname);
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001624 return -EINVAL;
Laurent Pinchart012043b2013-08-09 08:11:26 -03001625 }
1626
Hans Verkuil41381112013-12-13 13:13:39 -03001627 if (q->bufs[b->index] == NULL) {
Laurent Pinchart012043b2013-08-09 08:11:26 -03001628 /* Should never happen */
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001629 dprintk(1, "%s: buffer is NULL\n", opname);
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001630 return -EINVAL;
Laurent Pinchart012043b2013-08-09 08:11:26 -03001631 }
1632
1633 if (b->memory != q->memory) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001634 dprintk(1, "%s: invalid memory type\n", opname);
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001635 return -EINVAL;
Laurent Pinchart012043b2013-08-09 08:11:26 -03001636 }
1637
Hans Verkuil41381112013-12-13 13:13:39 -03001638 return __verify_planes_array(q->bufs[b->index], b);
Laurent Pinchart012043b2013-08-09 08:11:26 -03001639}
1640
Pawel Osciake23ccc02010-10-11 10:56:41 -03001641/**
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001642 * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
1643 * @q: videobuf2 queue
1644 * @b: buffer structure passed from userspace to vidioc_prepare_buf
1645 * handler in driver
1646 *
1647 * Should be called from vidioc_prepare_buf ioctl handler of a driver.
1648 * This function:
1649 * 1) verifies the passed buffer,
1650 * 2) calls buf_prepare callback in the driver (if provided), in which
1651 * driver-specific buffer initialization can be performed,
1652 *
1653 * The return values from this function are intended to be directly returned
1654 * from vidioc_prepare_buf handler in driver.
1655 */
1656int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
1657{
Hans Verkuil41381112013-12-13 13:13:39 -03001658 struct vb2_buffer *vb;
Hans Verkuilb2f2f042013-12-13 13:13:41 -03001659 int ret;
Hans Verkuil41381112013-12-13 13:13:39 -03001660
Hans Verkuilb2f2f042013-12-13 13:13:41 -03001661 if (q->fileio) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001662 dprintk(1, "file io in progress\n");
Hans Verkuilb2f2f042013-12-13 13:13:41 -03001663 return -EBUSY;
1664 }
1665
1666 ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf");
Hans Verkuil41381112013-12-13 13:13:39 -03001667 if (ret)
1668 return ret;
1669
1670 vb = q->bufs[b->index];
1671 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001672 dprintk(1, "invalid buffer state %d\n",
Hans Verkuil41381112013-12-13 13:13:39 -03001673 vb->state);
1674 return -EINVAL;
1675 }
1676
1677 ret = __buf_prepare(vb, b);
1678 if (!ret) {
1679 /* Fill buffer information for the userspace */
1680 __fill_v4l2_buffer(vb, b);
1681
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001682 dprintk(1, "prepare of buffer %d succeeded\n", vb->v4l2_buf.index);
Hans Verkuil41381112013-12-13 13:13:39 -03001683 }
1684 return ret;
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03001685}
1686EXPORT_SYMBOL_GPL(vb2_prepare_buf);
1687
Hans Verkuil02f142e2013-12-13 13:13:42 -03001688/**
1689 * vb2_start_streaming() - Attempt to start streaming.
1690 * @q: videobuf2 queue
1691 *
Hans Verkuilb3379c62014-02-24 13:51:03 -03001692 * Attempt to start streaming. When this function is called there must be
1693 * at least q->min_buffers_needed buffers queued up (i.e. the minimum
1694 * number of buffers required for the DMA engine to function). If the
1695 * @start_streaming op fails it is supposed to return all the driver-owned
1696 * buffers back to vb2 in state QUEUED. Check if that happened and if
1697 * not warn and reclaim them forcefully.
Hans Verkuil02f142e2013-12-13 13:13:42 -03001698 */
1699static int vb2_start_streaming(struct vb2_queue *q)
1700{
Hans Verkuilb3379c62014-02-24 13:51:03 -03001701 struct vb2_buffer *vb;
Hans Verkuil02f142e2013-12-13 13:13:42 -03001702 int ret;
1703
Hans Verkuil02f142e2013-12-13 13:13:42 -03001704 /*
Hans Verkuilb3379c62014-02-24 13:51:03 -03001705 * If any buffers were queued before streamon,
1706 * we can now pass them to driver for processing.
Hans Verkuil02f142e2013-12-13 13:13:42 -03001707 */
Hans Verkuilb3379c62014-02-24 13:51:03 -03001708 list_for_each_entry(vb, &q->queued_list, queued_entry)
1709 __enqueue_in_driver(vb);
1710
1711 /* Tell the driver to start streaming */
1712 ret = call_qop(q, start_streaming, q,
1713 atomic_read(&q->owned_by_drv_count));
1714 q->start_streaming_called = ret == 0;
1715 if (!ret)
Hans Verkuil02f142e2013-12-13 13:13:42 -03001716 return 0;
Hans Verkuilb3379c62014-02-24 13:51:03 -03001717
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001718 dprintk(1, "driver refused to start streaming\n");
Hans Verkuilb3379c62014-02-24 13:51:03 -03001719 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
1720 unsigned i;
1721
1722 /*
1723 * Forcefully reclaim buffers if the driver did not
1724 * correctly return them to vb2.
1725 */
1726 for (i = 0; i < q->num_buffers; ++i) {
1727 vb = q->bufs[i];
1728 if (vb->state == VB2_BUF_STATE_ACTIVE)
1729 vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
1730 }
1731 /* Must be zero now */
1732 WARN_ON(atomic_read(&q->owned_by_drv_count));
Hans Verkuil02f142e2013-12-13 13:13:42 -03001733 }
Hans Verkuil02f142e2013-12-13 13:13:42 -03001734 return ret;
1735}
1736
Hans Verkuilb2f2f042013-12-13 13:13:41 -03001737static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
Laurent Pinchart012043b2013-08-09 08:11:26 -03001738{
Hans Verkuil41381112013-12-13 13:13:39 -03001739 int ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
1740 struct vb2_buffer *vb;
1741
1742 if (ret)
1743 return ret;
1744
1745 vb = q->bufs[b->index];
Laurent Pinchart012043b2013-08-09 08:11:26 -03001746
1747 switch (vb->state) {
1748 case VB2_BUF_STATE_DEQUEUED:
1749 ret = __buf_prepare(vb, b);
1750 if (ret)
1751 return ret;
Hans Verkuil41381112013-12-13 13:13:39 -03001752 break;
Laurent Pinchart012043b2013-08-09 08:11:26 -03001753 case VB2_BUF_STATE_PREPARED:
1754 break;
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001755 case VB2_BUF_STATE_PREPARING:
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001756 dprintk(1, "buffer still being prepared\n");
Hans Verkuilb18a8ff2013-12-13 13:13:38 -03001757 return -EINVAL;
Laurent Pinchart012043b2013-08-09 08:11:26 -03001758 default:
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001759 dprintk(1, "invalid buffer state %d\n", vb->state);
Laurent Pinchart012043b2013-08-09 08:11:26 -03001760 return -EINVAL;
1761 }
1762
1763 /*
1764 * Add to the queued buffers list, a buffer will stay on it until
1765 * dequeued in dqbuf.
1766 */
1767 list_add_tail(&vb->queued_entry, &q->queued_list);
Hans Verkuilb3379c62014-02-24 13:51:03 -03001768 q->queued_count++;
Laurent Pinchart012043b2013-08-09 08:11:26 -03001769 vb->state = VB2_BUF_STATE_QUEUED;
Hans Verkuilf1343282014-02-24 14:44:50 -03001770 if (V4L2_TYPE_IS_OUTPUT(q->type)) {
1771 /*
1772 * For output buffers copy the timestamp if needed,
1773 * and the timecode field and flag if needed.
1774 */
Sakari Ailusc57ff792014-03-01 10:28:02 -03001775 if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
1776 V4L2_BUF_FLAG_TIMESTAMP_COPY)
Hans Verkuilf1343282014-02-24 14:44:50 -03001777 vb->v4l2_buf.timestamp = b->timestamp;
1778 vb->v4l2_buf.flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
1779 if (b->flags & V4L2_BUF_FLAG_TIMECODE)
1780 vb->v4l2_buf.timecode = b->timecode;
1781 }
Laurent Pinchart012043b2013-08-09 08:11:26 -03001782
1783 /*
1784 * If already streaming, give the buffer to driver for processing.
1785 * If not, the buffer will be given to driver on next streamon.
1786 */
Hans Verkuilb3379c62014-02-24 13:51:03 -03001787 if (q->start_streaming_called)
Laurent Pinchart012043b2013-08-09 08:11:26 -03001788 __enqueue_in_driver(vb);
1789
Hans Verkuil41381112013-12-13 13:13:39 -03001790 /* Fill buffer information for the userspace */
1791 __fill_v4l2_buffer(vb, b);
Laurent Pinchart012043b2013-08-09 08:11:26 -03001792
Hans Verkuilb3379c62014-02-24 13:51:03 -03001793 /*
1794 * If streamon has been called, and we haven't yet called
1795 * start_streaming() since not enough buffers were queued, and
1796 * we now have reached the minimum number of queued buffers,
1797 * then we can finally call start_streaming().
1798 */
1799 if (q->streaming && !q->start_streaming_called &&
1800 q->queued_count >= q->min_buffers_needed) {
Hans Verkuil02f142e2013-12-13 13:13:42 -03001801 ret = vb2_start_streaming(q);
1802 if (ret)
1803 return ret;
1804 }
1805
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001806 dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);
Hans Verkuil41381112013-12-13 13:13:39 -03001807 return 0;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001808}
Hans Verkuilb2f2f042013-12-13 13:13:41 -03001809
1810/**
1811 * vb2_qbuf() - Queue a buffer from userspace
1812 * @q: videobuf2 queue
1813 * @b: buffer structure passed from userspace to vidioc_qbuf handler
1814 * in driver
1815 *
1816 * Should be called from vidioc_qbuf ioctl handler of a driver.
1817 * This function:
1818 * 1) verifies the passed buffer,
1819 * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
1820 * which driver-specific buffer initialization can be performed,
1821 * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
1822 * callback for processing.
1823 *
1824 * The return values from this function are intended to be directly returned
1825 * from vidioc_qbuf handler in driver.
1826 */
1827int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
1828{
1829 if (q->fileio) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001830 dprintk(1, "file io in progress\n");
Hans Verkuilb2f2f042013-12-13 13:13:41 -03001831 return -EBUSY;
1832 }
1833
1834 return vb2_internal_qbuf(q, b);
1835}
Pawel Osciake23ccc02010-10-11 10:56:41 -03001836EXPORT_SYMBOL_GPL(vb2_qbuf);
1837
1838/**
1839 * __vb2_wait_for_done_vb() - wait for a buffer to become available
1840 * for dequeuing
1841 *
1842 * Will sleep if required for nonblocking == false.
1843 */
1844static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1845{
1846 /*
1847 * All operations on vb_done_list are performed under done_lock
1848 * spinlock protection. However, buffers may be removed from
1849 * it and returned to userspace only while holding both driver's
1850 * lock and the done_lock spinlock. Thus we can be sure that as
1851 * long as we hold the driver's lock, the list will remain not
1852 * empty if list_empty() check succeeds.
1853 */
1854
1855 for (;;) {
1856 int ret;
1857
1858 if (!q->streaming) {
1859 dprintk(1, "Streaming off, will not wait for buffers\n");
1860 return -EINVAL;
1861 }
1862
1863 if (!list_empty(&q->done_list)) {
1864 /*
1865 * Found a buffer that we were waiting for.
1866 */
1867 break;
1868 }
1869
1870 if (nonblocking) {
1871 dprintk(1, "Nonblocking and no buffers to dequeue, "
1872 "will not wait\n");
1873 return -EAGAIN;
1874 }
1875
1876 /*
1877 * We are streaming and blocking, wait for another buffer to
1878 * become ready or for streamoff. Driver's lock is released to
1879 * allow streamoff or qbuf to be called while waiting.
1880 */
Hans Verkuila1d36d82014-03-17 09:54:21 -03001881 call_void_qop(q, wait_prepare, q);
Pawel Osciake23ccc02010-10-11 10:56:41 -03001882
1883 /*
1884 * All locks have been released, it is safe to sleep now.
1885 */
1886 dprintk(3, "Will sleep waiting for buffers\n");
1887 ret = wait_event_interruptible(q->done_wq,
1888 !list_empty(&q->done_list) || !q->streaming);
1889
1890 /*
1891 * We need to reevaluate both conditions again after reacquiring
1892 * the locks or return an error if one occurred.
1893 */
Hans Verkuila1d36d82014-03-17 09:54:21 -03001894 call_void_qop(q, wait_finish, q);
Hans Verkuil32a77262012-09-28 06:12:53 -03001895 if (ret) {
1896 dprintk(1, "Sleep was interrupted\n");
Pawel Osciake23ccc02010-10-11 10:56:41 -03001897 return ret;
Hans Verkuil32a77262012-09-28 06:12:53 -03001898 }
Pawel Osciake23ccc02010-10-11 10:56:41 -03001899 }
1900 return 0;
1901}
1902
1903/**
1904 * __vb2_get_done_vb() - get a buffer ready for dequeuing
1905 *
1906 * Will sleep if required for nonblocking == false.
1907 */
1908static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
Hans Verkuil32a77262012-09-28 06:12:53 -03001909 struct v4l2_buffer *b, int nonblocking)
Pawel Osciake23ccc02010-10-11 10:56:41 -03001910{
1911 unsigned long flags;
1912 int ret;
1913
1914 /*
1915 * Wait for at least one buffer to become available on the done_list.
1916 */
1917 ret = __vb2_wait_for_done_vb(q, nonblocking);
1918 if (ret)
1919 return ret;
1920
1921 /*
1922 * Driver's lock has been held since we last verified that done_list
1923 * is not empty, so no need for another list_empty(done_list) check.
1924 */
1925 spin_lock_irqsave(&q->done_lock, flags);
1926 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
Hans Verkuil32a77262012-09-28 06:12:53 -03001927 /*
1928 * Only remove the buffer from done_list if v4l2_buffer can handle all
1929 * the planes.
1930 */
1931 ret = __verify_planes_array(*vb, b);
1932 if (!ret)
1933 list_del(&(*vb)->done_entry);
Pawel Osciake23ccc02010-10-11 10:56:41 -03001934 spin_unlock_irqrestore(&q->done_lock, flags);
1935
Hans Verkuil32a77262012-09-28 06:12:53 -03001936 return ret;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001937}
1938
1939/**
1940 * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
1941 * @q: videobuf2 queue
1942 *
1943 * This function will wait until all buffers that have been given to the driver
1944 * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call
1945 * wait_prepare, wait_finish pair. It is intended to be called with all locks
1946 * taken, for example from stop_streaming() callback.
1947 */
1948int vb2_wait_for_all_buffers(struct vb2_queue *q)
1949{
1950 if (!q->streaming) {
1951 dprintk(1, "Streaming off, will not wait for buffers\n");
1952 return -EINVAL;
1953 }
1954
Hans Verkuilb3379c62014-02-24 13:51:03 -03001955 if (q->start_streaming_called)
Hans Verkuil6ea3b982014-02-06 05:46:11 -03001956 wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
Pawel Osciake23ccc02010-10-11 10:56:41 -03001957 return 0;
1958}
1959EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
1960
1961/**
Sumit Semwalc5384042012-06-14 10:37:37 -03001962 * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
1963 */
1964static void __vb2_dqbuf(struct vb2_buffer *vb)
1965{
1966 struct vb2_queue *q = vb->vb2_queue;
1967 unsigned int i;
1968
1969 /* nothing to do if the buffer is already dequeued */
1970 if (vb->state == VB2_BUF_STATE_DEQUEUED)
1971 return;
1972
1973 vb->state = VB2_BUF_STATE_DEQUEUED;
1974
1975 /* unmap DMABUF buffer */
1976 if (q->memory == V4L2_MEMORY_DMABUF)
1977 for (i = 0; i < vb->num_planes; ++i) {
1978 if (!vb->planes[i].dbuf_mapped)
1979 continue;
Hans Verkuila1d36d82014-03-17 09:54:21 -03001980 call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
Sumit Semwalc5384042012-06-14 10:37:37 -03001981 vb->planes[i].dbuf_mapped = 0;
1982 }
1983}
1984
Hans Verkuilb2f2f042013-12-13 13:13:41 -03001985static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
Pawel Osciake23ccc02010-10-11 10:56:41 -03001986{
1987 struct vb2_buffer *vb = NULL;
1988 int ret;
1989
1990 if (b->type != q->type) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03001991 dprintk(1, "invalid buffer type\n");
Pawel Osciake23ccc02010-10-11 10:56:41 -03001992 return -EINVAL;
1993 }
Hans Verkuil32a77262012-09-28 06:12:53 -03001994 ret = __vb2_get_done_vb(q, &vb, b, nonblocking);
1995 if (ret < 0)
Pawel Osciake23ccc02010-10-11 10:56:41 -03001996 return ret;
Pawel Osciake23ccc02010-10-11 10:56:41 -03001997
Pawel Osciake23ccc02010-10-11 10:56:41 -03001998 switch (vb->state) {
1999 case VB2_BUF_STATE_DONE:
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002000 dprintk(3, "Returning done buffer\n");
Pawel Osciake23ccc02010-10-11 10:56:41 -03002001 break;
2002 case VB2_BUF_STATE_ERROR:
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002003 dprintk(3, "Returning done buffer with errors\n");
Pawel Osciake23ccc02010-10-11 10:56:41 -03002004 break;
2005 default:
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002006 dprintk(1, "Invalid buffer state\n");
Pawel Osciake23ccc02010-10-11 10:56:41 -03002007 return -EINVAL;
2008 }
2009
Hans Verkuila1d36d82014-03-17 09:54:21 -03002010 call_void_vb_qop(vb, buf_finish, vb);
Hans Verkuil9cf3c312014-02-28 13:30:48 -03002011
Pawel Osciake23ccc02010-10-11 10:56:41 -03002012 /* Fill buffer information for the userspace */
2013 __fill_v4l2_buffer(vb, b);
2014 /* Remove from videobuf queue */
2015 list_del(&vb->queued_entry);
Hans Verkuilb3379c62014-02-24 13:51:03 -03002016 q->queued_count--;
Sumit Semwalc5384042012-06-14 10:37:37 -03002017 /* go back to dequeued state */
2018 __vb2_dqbuf(vb);
Pawel Osciake23ccc02010-10-11 10:56:41 -03002019
2020 dprintk(1, "dqbuf of buffer %d, with state %d\n",
2021 vb->v4l2_buf.index, vb->state);
2022
Pawel Osciake23ccc02010-10-11 10:56:41 -03002023 return 0;
2024}
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002025
2026/**
2027 * vb2_dqbuf() - Dequeue a buffer to the userspace
2028 * @q: videobuf2 queue
2029 * @b: buffer structure passed from userspace to vidioc_dqbuf handler
2030 * in driver
2031 * @nonblocking: if true, this call will not sleep waiting for a buffer if no
2032 * buffers ready for dequeuing are present. Normally the driver
2033 * would be passing (file->f_flags & O_NONBLOCK) here
2034 *
2035 * Should be called from vidioc_dqbuf ioctl handler of a driver.
2036 * This function:
2037 * 1) verifies the passed buffer,
2038 * 2) calls buf_finish callback in the driver (if provided), in which
2039 * driver can perform any additional operations that may be required before
2040 * returning the buffer to userspace, such as cache sync,
2041 * 3) the buffer struct members are filled with relevant information for
2042 * the userspace.
2043 *
2044 * The return values from this function are intended to be directly returned
2045 * from vidioc_dqbuf handler in driver.
2046 */
2047int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
2048{
2049 if (q->fileio) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002050 dprintk(1, "file io in progress\n");
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002051 return -EBUSY;
2052 }
2053 return vb2_internal_dqbuf(q, b, nonblocking);
2054}
Pawel Osciake23ccc02010-10-11 10:56:41 -03002055EXPORT_SYMBOL_GPL(vb2_dqbuf);
2056
2057/**
Pawel Osciake23ccc02010-10-11 10:56:41 -03002058 * __vb2_queue_cancel() - cancel and stop (pause) streaming
2059 *
2060 * Removes all queued buffers from driver's queue and all buffers queued by
2061 * userspace from videobuf's queue. Returns to state after reqbufs.
2062 */
2063static void __vb2_queue_cancel(struct vb2_queue *q)
2064{
2065 unsigned int i;
2066
2067 /*
2068 * Tell driver to stop all transactions and release all queued
2069 * buffers.
2070 */
Hans Verkuilb3379c62014-02-24 13:51:03 -03002071 if (q->start_streaming_called)
Pawel Osciake23ccc02010-10-11 10:56:41 -03002072 call_qop(q, stop_streaming, q);
2073 q->streaming = 0;
Hans Verkuilb3379c62014-02-24 13:51:03 -03002074 q->start_streaming_called = 0;
2075 q->queued_count = 0;
2076
2077 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
2078 for (i = 0; i < q->num_buffers; ++i)
2079 if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE)
2080 vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
2081 /* Must be zero now */
2082 WARN_ON(atomic_read(&q->owned_by_drv_count));
2083 }
Pawel Osciake23ccc02010-10-11 10:56:41 -03002084
2085 /*
2086 * Remove all buffers from videobuf's list...
2087 */
2088 INIT_LIST_HEAD(&q->queued_list);
2089 /*
2090 * ...and done list; userspace will not receive any buffers it
2091 * has not already dequeued before initiating cancel.
2092 */
2093 INIT_LIST_HEAD(&q->done_list);
Hans Verkuil6ea3b982014-02-06 05:46:11 -03002094 atomic_set(&q->owned_by_drv_count, 0);
Pawel Osciake23ccc02010-10-11 10:56:41 -03002095 wake_up_all(&q->done_wq);
2096
2097 /*
2098 * Reinitialize all buffers for next use.
Hans Verkuil9c0863b2014-03-04 07:34:49 -03002099 * Make sure to call buf_finish for any queued buffers. Normally
2100 * that's done in dqbuf, but that's not going to happen when we
2101 * cancel the whole queue. Note: this code belongs here, not in
2102 * __vb2_dqbuf() since in vb2_internal_dqbuf() there is a critical
2103 * call to __fill_v4l2_buffer() after buf_finish(). That order can't
2104 * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
Pawel Osciake23ccc02010-10-11 10:56:41 -03002105 */
Hans Verkuil9c0863b2014-03-04 07:34:49 -03002106 for (i = 0; i < q->num_buffers; ++i) {
2107 struct vb2_buffer *vb = q->bufs[i];
2108
2109 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
2110 vb->state = VB2_BUF_STATE_PREPARED;
Hans Verkuila1d36d82014-03-17 09:54:21 -03002111 call_void_vb_qop(vb, buf_finish, vb);
Hans Verkuil9c0863b2014-03-04 07:34:49 -03002112 }
2113 __vb2_dqbuf(vb);
2114 }
Pawel Osciake23ccc02010-10-11 10:56:41 -03002115}
2116
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002117static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002118{
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002119 int ret;
2120
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002121 if (type != q->type) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002122 dprintk(1, "invalid stream type\n");
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002123 return -EINVAL;
2124 }
2125
2126 if (q->streaming) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002127 dprintk(3, "already streaming\n");
Ricardo Ribaldaf9560352013-11-08 07:08:45 -03002128 return 0;
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002129 }
2130
Ricardo Ribalda548df782014-01-08 05:01:33 -03002131 if (!q->num_buffers) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002132 dprintk(1, "no buffers have been allocated\n");
Ricardo Ribalda548df782014-01-08 05:01:33 -03002133 return -EINVAL;
2134 }
2135
Ricardo Ribalda Delgado249f5a52014-01-08 05:01:33 -03002136 if (!q->num_buffers) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002137 dprintk(1, "no buffers have been allocated\n");
Ricardo Ribalda Delgado249f5a52014-01-08 05:01:33 -03002138 return -EINVAL;
2139 }
Hans Verkuilb3379c62014-02-24 13:51:03 -03002140 if (q->num_buffers < q->min_buffers_needed) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002141 dprintk(1, "need at least %u allocated buffers\n",
Hans Verkuilb3379c62014-02-24 13:51:03 -03002142 q->min_buffers_needed);
2143 return -EINVAL;
2144 }
Ricardo Ribalda Delgado249f5a52014-01-08 05:01:33 -03002145
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002146 /*
Hans Verkuilb3379c62014-02-24 13:51:03 -03002147 * Tell driver to start streaming provided sufficient buffers
2148 * are available.
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002149 */
Hans Verkuilb3379c62014-02-24 13:51:03 -03002150 if (q->queued_count >= q->min_buffers_needed) {
2151 ret = vb2_start_streaming(q);
2152 if (ret) {
2153 __vb2_queue_cancel(q);
2154 return ret;
2155 }
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002156 }
2157
2158 q->streaming = 1;
2159
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002160 dprintk(3, "successful\n");
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002161 return 0;
2162}
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002163
2164/**
2165 * vb2_streamon - start streaming
2166 * @q: videobuf2 queue
2167 * @type: type argument passed from userspace to vidioc_streamon handler
2168 *
2169 * Should be called from vidioc_streamon handler of a driver.
2170 * This function:
2171 * 1) verifies current state
2172 * 2) passes any previously queued buffers to the driver and starts streaming
2173 *
2174 * The return values from this function are intended to be directly returned
2175 * from vidioc_streamon handler in the driver.
2176 */
2177int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
2178{
2179 if (q->fileio) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002180 dprintk(1, "file io in progress\n");
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002181 return -EBUSY;
2182 }
2183 return vb2_internal_streamon(q, type);
2184}
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002185EXPORT_SYMBOL_GPL(vb2_streamon);
2186
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002187static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
2188{
2189 if (type != q->type) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002190 dprintk(1, "invalid stream type\n");
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002191 return -EINVAL;
2192 }
2193
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002194 /*
2195 * Cancel will pause streaming and remove all buffers from the driver
2196 * and videobuf, effectively returning control over them to userspace.
Hans Verkuil3f1a9a32014-02-25 09:42:45 -03002197 *
2198 * Note that we do this even if q->streaming == 0: if you prepare or
2199 * queue buffers, and then call streamoff without ever having called
2200 * streamon, you would still expect those buffers to be returned to
2201 * their normal dequeued state.
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002202 */
2203 __vb2_queue_cancel(q);
2204
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002205 dprintk(3, "successful\n");
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002206 return 0;
2207}
Marek Szyprowskibd323e22011-08-29 08:51:49 -03002208
2209/**
Pawel Osciake23ccc02010-10-11 10:56:41 -03002210 * vb2_streamoff - stop streaming
2211 * @q: videobuf2 queue
2212 * @type: type argument passed from userspace to vidioc_streamoff handler
2213 *
2214 * Should be called from vidioc_streamoff handler of a driver.
2215 * This function:
2216 * 1) verifies current state,
2217 * 2) stop streaming and dequeues any queued buffers, including those previously
2218 * passed to the driver (after waiting for the driver to finish).
2219 *
2220 * This call can be used for pausing playback.
2221 * The return values from this function are intended to be directly returned
2222 * from vidioc_streamoff handler in the driver
2223 */
2224int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
2225{
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002226 if (q->fileio) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002227 dprintk(1, "file io in progress\n");
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002228 return -EBUSY;
2229 }
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002230 return vb2_internal_streamoff(q, type);
Pawel Osciake23ccc02010-10-11 10:56:41 -03002231}
2232EXPORT_SYMBOL_GPL(vb2_streamoff);
2233
2234/**
2235 * __find_plane_by_offset() - find plane associated with the given offset off
2236 */
2237static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
2238 unsigned int *_buffer, unsigned int *_plane)
2239{
2240 struct vb2_buffer *vb;
2241 unsigned int buffer, plane;
2242
2243 /*
2244 * Go over all buffers and their planes, comparing the given offset
2245 * with an offset assigned to each plane. If a match is found,
2246 * return its buffer and plane numbers.
2247 */
2248 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
2249 vb = q->bufs[buffer];
2250
2251 for (plane = 0; plane < vb->num_planes; ++plane) {
2252 if (vb->v4l2_planes[plane].m.mem_offset == off) {
2253 *_buffer = buffer;
2254 *_plane = plane;
2255 return 0;
2256 }
2257 }
2258 }
2259
2260 return -EINVAL;
2261}
2262
2263/**
Tomasz Stanislawski83ae7c52012-06-14 11:32:24 -03002264 * vb2_expbuf() - Export a buffer as a file descriptor
2265 * @q: videobuf2 queue
2266 * @eb: export buffer structure passed from userspace to vidioc_expbuf
2267 * handler in driver
2268 *
2269 * The return values from this function are intended to be directly returned
2270 * from vidioc_expbuf handler in driver.
2271 */
2272int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
2273{
2274 struct vb2_buffer *vb = NULL;
2275 struct vb2_plane *vb_plane;
2276 int ret;
2277 struct dma_buf *dbuf;
2278
2279 if (q->memory != V4L2_MEMORY_MMAP) {
2280 dprintk(1, "Queue is not currently set up for mmap\n");
2281 return -EINVAL;
2282 }
2283
2284 if (!q->mem_ops->get_dmabuf) {
2285 dprintk(1, "Queue does not support DMA buffer exporting\n");
2286 return -EINVAL;
2287 }
2288
Philipp Zabelea3aba82013-05-21 05:11:35 -03002289 if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) {
2290 dprintk(1, "Queue does support only O_CLOEXEC and access mode flags\n");
Tomasz Stanislawski83ae7c52012-06-14 11:32:24 -03002291 return -EINVAL;
2292 }
2293
2294 if (eb->type != q->type) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002295 dprintk(1, "invalid buffer type\n");
Tomasz Stanislawski83ae7c52012-06-14 11:32:24 -03002296 return -EINVAL;
2297 }
2298
2299 if (eb->index >= q->num_buffers) {
2300 dprintk(1, "buffer index out of range\n");
2301 return -EINVAL;
2302 }
2303
2304 vb = q->bufs[eb->index];
2305
2306 if (eb->plane >= vb->num_planes) {
2307 dprintk(1, "buffer plane out of range\n");
2308 return -EINVAL;
2309 }
2310
2311 vb_plane = &vb->planes[eb->plane];
2312
Hans Verkuila1d36d82014-03-17 09:54:21 -03002313 dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE);
Tomasz Stanislawski83ae7c52012-06-14 11:32:24 -03002314 if (IS_ERR_OR_NULL(dbuf)) {
2315 dprintk(1, "Failed to export buffer %d, plane %d\n",
2316 eb->index, eb->plane);
2317 return -EINVAL;
2318 }
2319
Philipp Zabelea3aba82013-05-21 05:11:35 -03002320 ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE);
Tomasz Stanislawski83ae7c52012-06-14 11:32:24 -03002321 if (ret < 0) {
2322 dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
2323 eb->index, eb->plane, ret);
2324 dma_buf_put(dbuf);
2325 return ret;
2326 }
2327
2328 dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
2329 eb->index, eb->plane, ret);
2330 eb->fd = ret;
2331
2332 return 0;
2333}
2334EXPORT_SYMBOL_GPL(vb2_expbuf);
2335
2336/**
Pawel Osciake23ccc02010-10-11 10:56:41 -03002337 * vb2_mmap() - map video buffers into application address space
2338 * @q: videobuf2 queue
2339 * @vma: vma passed to the mmap file operation handler in the driver
2340 *
2341 * Should be called from mmap file operation handler of a driver.
2342 * This function maps one plane of one of the available video buffers to
2343 * userspace. To map whole video memory allocated on reqbufs, this function
2344 * has to be called once per each plane per each buffer previously allocated.
2345 *
2346 * When the userspace application calls mmap, it passes to it an offset returned
2347 * to it earlier by the means of vidioc_querybuf handler. That offset acts as
2348 * a "cookie", which is then used to identify the plane to be mapped.
2349 * This function finds a plane with a matching offset and a mapping is performed
2350 * by the means of a provided memory operation.
2351 *
2352 * The return values from this function are intended to be directly returned
2353 * from the mmap handler in driver.
2354 */
2355int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
2356{
2357 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
Pawel Osciake23ccc02010-10-11 10:56:41 -03002358 struct vb2_buffer *vb;
2359 unsigned int buffer, plane;
2360 int ret;
Mauro Carvalho Chehab7f841452013-04-19 07:18:01 -03002361 unsigned long length;
Pawel Osciake23ccc02010-10-11 10:56:41 -03002362
2363 if (q->memory != V4L2_MEMORY_MMAP) {
2364 dprintk(1, "Queue is not currently set up for mmap\n");
2365 return -EINVAL;
2366 }
2367
2368 /*
2369 * Check memory area access mode.
2370 */
2371 if (!(vma->vm_flags & VM_SHARED)) {
2372 dprintk(1, "Invalid vma flags, VM_SHARED needed\n");
2373 return -EINVAL;
2374 }
2375 if (V4L2_TYPE_IS_OUTPUT(q->type)) {
2376 if (!(vma->vm_flags & VM_WRITE)) {
2377 dprintk(1, "Invalid vma flags, VM_WRITE needed\n");
2378 return -EINVAL;
2379 }
2380 } else {
2381 if (!(vma->vm_flags & VM_READ)) {
2382 dprintk(1, "Invalid vma flags, VM_READ needed\n");
2383 return -EINVAL;
2384 }
2385 }
2386
2387 /*
2388 * Find the plane corresponding to the offset passed by userspace.
2389 */
2390 ret = __find_plane_by_offset(q, off, &buffer, &plane);
2391 if (ret)
2392 return ret;
2393
2394 vb = q->bufs[buffer];
Pawel Osciake23ccc02010-10-11 10:56:41 -03002395
Mauro Carvalho Chehab7f841452013-04-19 07:18:01 -03002396 /*
2397 * MMAP requires page_aligned buffers.
2398 * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
2399 * so, we need to do the same here.
2400 */
2401 length = PAGE_ALIGN(vb->v4l2_planes[plane].length);
2402 if (length < (vma->vm_end - vma->vm_start)) {
2403 dprintk(1,
2404 "MMAP invalid, as it would overflow buffer length\n");
Seung-Woo Kim068a0df2013-04-11 23:57:57 -03002405 return -EINVAL;
2406 }
2407
Hans Verkuilb5b45412014-01-29 11:53:25 -03002408 ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
Hans Verkuila1d36d82014-03-17 09:54:21 -03002409 if (ret)
Pawel Osciake23ccc02010-10-11 10:56:41 -03002410 return ret;
2411
Pawel Osciake23ccc02010-10-11 10:56:41 -03002412 dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane);
2413 return 0;
2414}
2415EXPORT_SYMBOL_GPL(vb2_mmap);
2416
Scott Jiang6f524ec2011-09-21 09:25:23 -03002417#ifndef CONFIG_MMU
2418unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
2419 unsigned long addr,
2420 unsigned long len,
2421 unsigned long pgoff,
2422 unsigned long flags)
2423{
2424 unsigned long off = pgoff << PAGE_SHIFT;
2425 struct vb2_buffer *vb;
2426 unsigned int buffer, plane;
2427 int ret;
2428
2429 if (q->memory != V4L2_MEMORY_MMAP) {
2430 dprintk(1, "Queue is not currently set up for mmap\n");
2431 return -EINVAL;
2432 }
2433
2434 /*
2435 * Find the plane corresponding to the offset passed by userspace.
2436 */
2437 ret = __find_plane_by_offset(q, off, &buffer, &plane);
2438 if (ret)
2439 return ret;
2440
2441 vb = q->bufs[buffer];
2442
2443 return (unsigned long)vb2_plane_vaddr(vb, plane);
2444}
2445EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
2446#endif
2447
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002448static int __vb2_init_fileio(struct vb2_queue *q, int read);
2449static int __vb2_cleanup_fileio(struct vb2_queue *q);
Pawel Osciake23ccc02010-10-11 10:56:41 -03002450
2451/**
2452 * vb2_poll() - implements poll userspace operation
2453 * @q: videobuf2 queue
2454 * @file: file argument passed to the poll file operation handler
2455 * @wait: wait argument passed to the poll file operation handler
2456 *
2457 * This function implements poll file operation handler for a driver.
2458 * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
2459 * be informed that the file descriptor of a video device is available for
2460 * reading.
2461 * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
2462 * will be reported as available for writing.
2463 *
Hans Verkuil95213ce2011-07-13 04:26:52 -03002464 * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any
2465 * pending events.
2466 *
Pawel Osciake23ccc02010-10-11 10:56:41 -03002467 * The return values from this function are intended to be directly returned
2468 * from poll handler in driver.
2469 */
2470unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
2471{
Hans Verkuil95213ce2011-07-13 04:26:52 -03002472 struct video_device *vfd = video_devdata(file);
Hans Verkuilbf5c7cb2011-07-13 04:01:30 -03002473 unsigned long req_events = poll_requested_events(wait);
Pawel Osciake23ccc02010-10-11 10:56:41 -03002474 struct vb2_buffer *vb = NULL;
Hans Verkuil95213ce2011-07-13 04:26:52 -03002475 unsigned int res = 0;
2476 unsigned long flags;
2477
2478 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
2479 struct v4l2_fh *fh = file->private_data;
2480
2481 if (v4l2_event_pending(fh))
2482 res = POLLPRI;
2483 else if (req_events & POLLPRI)
2484 poll_wait(file, &fh->wait, wait);
2485 }
Pawel Osciake23ccc02010-10-11 10:56:41 -03002486
Hans Verkuilcd138232013-01-30 13:29:02 -03002487 if (!V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLIN | POLLRDNORM)))
2488 return res;
2489 if (V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLOUT | POLLWRNORM)))
2490 return res;
2491
Pawel Osciake23ccc02010-10-11 10:56:41 -03002492 /*
Pawel Osciak4ffabdb2011-03-20 18:17:34 -03002493 * Start file I/O emulator only if streaming API has not been used yet.
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002494 */
2495 if (q->num_buffers == 0 && q->fileio == NULL) {
Hans Verkuilbf5c7cb2011-07-13 04:01:30 -03002496 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
2497 (req_events & (POLLIN | POLLRDNORM))) {
Hans Verkuil95213ce2011-07-13 04:26:52 -03002498 if (__vb2_init_fileio(q, 1))
2499 return res | POLLERR;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002500 }
Hans Verkuilbf5c7cb2011-07-13 04:01:30 -03002501 if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
2502 (req_events & (POLLOUT | POLLWRNORM))) {
Hans Verkuil95213ce2011-07-13 04:26:52 -03002503 if (__vb2_init_fileio(q, 0))
2504 return res | POLLERR;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002505 /*
2506 * Write to OUTPUT queue can be done immediately.
2507 */
Hans Verkuil95213ce2011-07-13 04:26:52 -03002508 return res | POLLOUT | POLLWRNORM;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002509 }
2510 }
2511
2512 /*
Pawel Osciake23ccc02010-10-11 10:56:41 -03002513 * There is nothing to wait for if no buffers have already been queued.
2514 */
2515 if (list_empty(&q->queued_list))
Hans Verkuil95213ce2011-07-13 04:26:52 -03002516 return res | POLLERR;
Pawel Osciake23ccc02010-10-11 10:56:41 -03002517
Seung-Woo Kim412cb872013-05-20 23:47:29 -03002518 if (list_empty(&q->done_list))
2519 poll_wait(file, &q->done_wq, wait);
Pawel Osciake23ccc02010-10-11 10:56:41 -03002520
2521 /*
2522 * Take first buffer available for dequeuing.
2523 */
2524 spin_lock_irqsave(&q->done_lock, flags);
2525 if (!list_empty(&q->done_list))
2526 vb = list_first_entry(&q->done_list, struct vb2_buffer,
2527 done_entry);
2528 spin_unlock_irqrestore(&q->done_lock, flags);
2529
2530 if (vb && (vb->state == VB2_BUF_STATE_DONE
2531 || vb->state == VB2_BUF_STATE_ERROR)) {
Hans Verkuil95213ce2011-07-13 04:26:52 -03002532 return (V4L2_TYPE_IS_OUTPUT(q->type)) ?
2533 res | POLLOUT | POLLWRNORM :
2534 res | POLLIN | POLLRDNORM;
Pawel Osciake23ccc02010-10-11 10:56:41 -03002535 }
Hans Verkuil95213ce2011-07-13 04:26:52 -03002536 return res;
Pawel Osciake23ccc02010-10-11 10:56:41 -03002537}
2538EXPORT_SYMBOL_GPL(vb2_poll);
2539
2540/**
2541 * vb2_queue_init() - initialize a videobuf2 queue
2542 * @q: videobuf2 queue; this structure should be allocated in driver
2543 *
2544 * The vb2_queue structure should be allocated by the driver. The driver is
2545 * responsible of clearing it's content and setting initial values for some
2546 * required entries before calling this function.
2547 * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
2548 * to the struct vb2_queue description in include/media/videobuf2-core.h
2549 * for more information.
2550 */
2551int vb2_queue_init(struct vb2_queue *q)
2552{
Ezequiel Garcia896f38f2012-09-17 14:59:30 -03002553 /*
2554 * Sanity check
2555 */
2556 if (WARN_ON(!q) ||
2557 WARN_ON(!q->ops) ||
2558 WARN_ON(!q->mem_ops) ||
2559 WARN_ON(!q->type) ||
2560 WARN_ON(!q->io_modes) ||
2561 WARN_ON(!q->ops->queue_setup) ||
Kamil Debski6aa69f92013-01-25 06:29:57 -03002562 WARN_ON(!q->ops->buf_queue) ||
Sakari Ailus872484c2013-08-25 17:57:03 -03002563 WARN_ON(q->timestamp_flags &
2564 ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
2565 V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
Ezequiel Garcia896f38f2012-09-17 14:59:30 -03002566 return -EINVAL;
Pawel Osciake23ccc02010-10-11 10:56:41 -03002567
Kamil Debski6aa69f92013-01-25 06:29:57 -03002568 /* Warn that the driver should choose an appropriate timestamp type */
Sakari Ailusc57ff792014-03-01 10:28:02 -03002569 WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
2570 V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
Kamil Debski6aa69f92013-01-25 06:29:57 -03002571
Pawel Osciake23ccc02010-10-11 10:56:41 -03002572 INIT_LIST_HEAD(&q->queued_list);
2573 INIT_LIST_HEAD(&q->done_list);
2574 spin_lock_init(&q->done_lock);
2575 init_waitqueue_head(&q->done_wq);
2576
2577 if (q->buf_struct_size == 0)
2578 q->buf_struct_size = sizeof(struct vb2_buffer);
2579
2580 return 0;
2581}
2582EXPORT_SYMBOL_GPL(vb2_queue_init);
2583
2584/**
2585 * vb2_queue_release() - stop streaming, release the queue and free memory
2586 * @q: videobuf2 queue
2587 *
2588 * This function stops streaming and performs necessary clean ups, including
2589 * freeing video buffer memory. The driver is responsible for freeing
2590 * the vb2_queue structure itself.
2591 */
2592void vb2_queue_release(struct vb2_queue *q)
2593{
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002594 __vb2_cleanup_fileio(q);
Pawel Osciake23ccc02010-10-11 10:56:41 -03002595 __vb2_queue_cancel(q);
Guennadi Liakhovetski2d864012011-09-28 09:23:02 -03002596 __vb2_queue_free(q, q->num_buffers);
Pawel Osciake23ccc02010-10-11 10:56:41 -03002597}
2598EXPORT_SYMBOL_GPL(vb2_queue_release);
2599
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002600/**
2601 * struct vb2_fileio_buf - buffer context used by file io emulator
2602 *
2603 * vb2 provides a compatibility layer and emulator of file io (read and
2604 * write) calls on top of streaming API. This structure is used for
2605 * tracking context related to the buffers.
2606 */
2607struct vb2_fileio_buf {
2608 void *vaddr;
2609 unsigned int size;
2610 unsigned int pos;
2611 unsigned int queued:1;
2612};
2613
2614/**
2615 * struct vb2_fileio_data - queue context used by file io emulator
2616 *
Hans Verkuil4e5a4d82014-02-14 06:46:50 -03002617 * @cur_index: the index of the buffer currently being read from or
2618 * written to. If equal to q->num_buffers then a new buffer
2619 * must be dequeued.
2620 * @initial_index: in the read() case all buffers are queued up immediately
2621 * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
2622 * buffers. However, in the write() case no buffers are initially
2623 * queued, instead whenever a buffer is full it is queued up by
2624 * __vb2_perform_fileio(). Only once all available buffers have
2625 * been queued up will __vb2_perform_fileio() start to dequeue
2626 * buffers. This means that initially __vb2_perform_fileio()
2627 * needs to know what buffer index to use when it is queuing up
2628 * the buffers for the first time. That initial index is stored
2629 * in this field. Once it is equal to q->num_buffers all
2630 * available buffers have been queued and __vb2_perform_fileio()
2631 * should start the normal dequeue/queue cycle.
2632 *
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002633 * vb2 provides a compatibility layer and emulator of file io (read and
2634 * write) calls on top of streaming API. For proper operation it required
2635 * this structure to save the driver state between each call of the read
2636 * or write function.
2637 */
2638struct vb2_fileio_data {
2639 struct v4l2_requestbuffers req;
2640 struct v4l2_buffer b;
2641 struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME];
Hans Verkuil4e5a4d82014-02-14 06:46:50 -03002642 unsigned int cur_index;
2643 unsigned int initial_index;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002644 unsigned int q_count;
2645 unsigned int dq_count;
2646 unsigned int flags;
2647};
2648
2649/**
2650 * __vb2_init_fileio() - initialize file io emulator
2651 * @q: videobuf2 queue
2652 * @read: mode selector (1 means read, 0 means write)
2653 */
2654static int __vb2_init_fileio(struct vb2_queue *q, int read)
2655{
2656 struct vb2_fileio_data *fileio;
2657 int i, ret;
2658 unsigned int count = 0;
2659
2660 /*
2661 * Sanity check
2662 */
Hans Verkuile4d25812014-02-03 11:22:45 -03002663 if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
2664 (!read && !(q->io_modes & VB2_WRITE))))
2665 return -EINVAL;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002666
2667 /*
2668 * Check if device supports mapping buffers to kernel virtual space.
2669 */
2670 if (!q->mem_ops->vaddr)
2671 return -EBUSY;
2672
2673 /*
2674 * Check if streaming api has not been already activated.
2675 */
2676 if (q->streaming || q->num_buffers > 0)
2677 return -EBUSY;
2678
2679 /*
2680 * Start with count 1, driver can increase it in queue_setup()
2681 */
2682 count = 1;
2683
2684 dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n",
2685 (read) ? "read" : "write", count, q->io_flags);
2686
2687 fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
2688 if (fileio == NULL)
2689 return -ENOMEM;
2690
2691 fileio->flags = q->io_flags;
2692
2693 /*
2694 * Request buffers and use MMAP type to force driver
2695 * to allocate buffers by itself.
2696 */
2697 fileio->req.count = count;
2698 fileio->req.memory = V4L2_MEMORY_MMAP;
2699 fileio->req.type = q->type;
2700 ret = vb2_reqbufs(q, &fileio->req);
2701 if (ret)
2702 goto err_kfree;
2703
2704 /*
2705 * Check if plane_count is correct
2706 * (multiplane buffers are not supported).
2707 */
2708 if (q->bufs[0]->num_planes != 1) {
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002709 ret = -EBUSY;
2710 goto err_reqbufs;
2711 }
2712
2713 /*
2714 * Get kernel address of each buffer.
2715 */
2716 for (i = 0; i < q->num_buffers; i++) {
2717 fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
Wei Yongjun5dd69462013-05-13 01:48:45 -03002718 if (fileio->bufs[i].vaddr == NULL) {
2719 ret = -EINVAL;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002720 goto err_reqbufs;
Wei Yongjun5dd69462013-05-13 01:48:45 -03002721 }
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002722 fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
2723 }
2724
2725 /*
2726 * Read mode requires pre queuing of all buffers.
2727 */
2728 if (read) {
2729 /*
2730 * Queue all buffers.
2731 */
2732 for (i = 0; i < q->num_buffers; i++) {
2733 struct v4l2_buffer *b = &fileio->b;
2734 memset(b, 0, sizeof(*b));
2735 b->type = q->type;
2736 b->memory = q->memory;
2737 b->index = i;
2738 ret = vb2_qbuf(q, b);
2739 if (ret)
2740 goto err_reqbufs;
2741 fileio->bufs[i].queued = 1;
2742 }
Hans Verkuil4e5a4d82014-02-14 06:46:50 -03002743 /*
2744 * All buffers have been queued, so mark that by setting
2745 * initial_index to q->num_buffers
2746 */
2747 fileio->initial_index = q->num_buffers;
2748 fileio->cur_index = q->num_buffers;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002749 }
2750
Hans Verkuil02f142e2013-12-13 13:13:42 -03002751 /*
2752 * Start streaming.
2753 */
2754 ret = vb2_streamon(q, q->type);
2755 if (ret)
2756 goto err_reqbufs;
2757
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002758 q->fileio = fileio;
2759
2760 return ret;
2761
2762err_reqbufs:
Hans de Goedea67e1722012-05-08 14:47:39 -03002763 fileio->req.count = 0;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002764 vb2_reqbufs(q, &fileio->req);
2765
2766err_kfree:
2767 kfree(fileio);
2768 return ret;
2769}
2770
2771/**
2772 * __vb2_cleanup_fileio() - free resourced used by file io emulator
2773 * @q: videobuf2 queue
2774 */
2775static int __vb2_cleanup_fileio(struct vb2_queue *q)
2776{
2777 struct vb2_fileio_data *fileio = q->fileio;
2778
2779 if (fileio) {
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002780 vb2_internal_streamoff(q, q->type);
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002781 q->fileio = NULL;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002782 fileio->req.count = 0;
2783 vb2_reqbufs(q, &fileio->req);
2784 kfree(fileio);
2785 dprintk(3, "file io emulator closed\n");
2786 }
2787 return 0;
2788}
2789
2790/**
2791 * __vb2_perform_fileio() - perform a single file io (read or write) operation
2792 * @q: videobuf2 queue
2793 * @data: pointed to target userspace buffer
2794 * @count: number of bytes to read or write
2795 * @ppos: file handle position tracking pointer
2796 * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
2797 * @read: access mode selector (1 means read, 0 means write)
2798 */
2799static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
2800 loff_t *ppos, int nonblock, int read)
2801{
2802 struct vb2_fileio_data *fileio;
2803 struct vb2_fileio_buf *buf;
Hans Verkuilebd7c502014-04-11 04:36:57 -03002804 /*
2805 * When using write() to write data to an output video node the vb2 core
2806 * should set timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody
2807 * else is able to provide this information with the write() operation.
2808 */
2809 bool set_timestamp = !read &&
2810 (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
2811 V4L2_BUF_FLAG_TIMESTAMP_COPY;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002812 int ret, index;
2813
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002814 dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002815 read ? "read" : "write", (long)*ppos, count,
2816 nonblock ? "non" : "");
2817
2818 if (!data)
2819 return -EINVAL;
2820
2821 /*
2822 * Initialize emulator on first call.
2823 */
2824 if (!q->fileio) {
2825 ret = __vb2_init_fileio(q, read);
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002826 dprintk(3, "vb2_init_fileio result: %d\n", ret);
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002827 if (ret)
2828 return ret;
2829 }
2830 fileio = q->fileio;
2831
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002832 /*
2833 * Check if we need to dequeue the buffer.
2834 */
Hans Verkuil4e5a4d82014-02-14 06:46:50 -03002835 index = fileio->cur_index;
Hans Verkuil88e26872013-12-13 13:13:45 -03002836 if (index >= q->num_buffers) {
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002837 /*
2838 * Call vb2_dqbuf to get buffer back.
2839 */
2840 memset(&fileio->b, 0, sizeof(fileio->b));
2841 fileio->b.type = q->type;
2842 fileio->b.memory = q->memory;
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002843 ret = vb2_internal_dqbuf(q, &fileio->b, nonblock);
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002844 dprintk(5, "vb2_dqbuf result: %d\n", ret);
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002845 if (ret)
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002846 return ret;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002847 fileio->dq_count += 1;
2848
Hans Verkuil4e5a4d82014-02-14 06:46:50 -03002849 fileio->cur_index = index = fileio->b.index;
Hans Verkuil88e26872013-12-13 13:13:45 -03002850 buf = &fileio->bufs[index];
2851
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002852 /*
2853 * Get number of bytes filled by the driver
2854 */
Hans Verkuil88e26872013-12-13 13:13:45 -03002855 buf->pos = 0;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002856 buf->queued = 0;
Hans Verkuil88e26872013-12-13 13:13:45 -03002857 buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
2858 : vb2_plane_size(q->bufs[index], 0);
2859 } else {
2860 buf = &fileio->bufs[index];
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002861 }
2862
2863 /*
2864 * Limit count on last few bytes of the buffer.
2865 */
2866 if (buf->pos + count > buf->size) {
2867 count = buf->size - buf->pos;
Mauro Carvalho Chehab08b99e22011-01-11 17:12:34 -03002868 dprintk(5, "reducing read count: %zd\n", count);
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002869 }
2870
2871 /*
2872 * Transfer data to userspace.
2873 */
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002874 dprintk(3, "copying %zd bytes - buffer %d, offset %u\n",
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002875 count, index, buf->pos);
2876 if (read)
2877 ret = copy_to_user(data, buf->vaddr + buf->pos, count);
2878 else
2879 ret = copy_from_user(buf->vaddr + buf->pos, data, count);
2880 if (ret) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002881 dprintk(3, "error copying data\n");
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002882 return -EFAULT;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002883 }
2884
2885 /*
2886 * Update counters.
2887 */
2888 buf->pos += count;
2889 *ppos += count;
2890
2891 /*
2892 * Queue next buffer if required.
2893 */
2894 if (buf->pos == buf->size ||
2895 (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) {
2896 /*
2897 * Check if this is the last buffer to read.
2898 */
2899 if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
2900 fileio->dq_count == 1) {
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002901 dprintk(3, "read limit reached\n");
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002902 return __vb2_cleanup_fileio(q);
2903 }
2904
2905 /*
2906 * Call vb2_qbuf and give buffer to the driver.
2907 */
2908 memset(&fileio->b, 0, sizeof(fileio->b));
2909 fileio->b.type = q->type;
2910 fileio->b.memory = q->memory;
2911 fileio->b.index = index;
2912 fileio->b.bytesused = buf->pos;
Hans Verkuilebd7c502014-04-11 04:36:57 -03002913 if (set_timestamp)
2914 v4l2_get_timestamp(&fileio->b.timestamp);
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002915 ret = vb2_internal_qbuf(q, &fileio->b);
Hans Verkuilfd4354c2014-04-07 09:08:47 -03002916 dprintk(5, "vb2_dbuf result: %d\n", ret);
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002917 if (ret)
Hans Verkuilb2f2f042013-12-13 13:13:41 -03002918 return ret;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002919
2920 /*
2921 * Buffer has been queued, update the status
2922 */
2923 buf->pos = 0;
2924 buf->queued = 1;
Hans Verkuil88e26872013-12-13 13:13:45 -03002925 buf->size = vb2_plane_size(q->bufs[index], 0);
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002926 fileio->q_count += 1;
Hans Verkuil4e5a4d82014-02-14 06:46:50 -03002927 /*
2928 * If we are queuing up buffers for the first time, then
2929 * increase initial_index by one.
2930 */
2931 if (fileio->initial_index < q->num_buffers)
2932 fileio->initial_index++;
2933 /*
2934 * The next buffer to use is either a buffer that's going to be
2935 * queued for the first time (initial_index < q->num_buffers)
2936 * or it is equal to q->num_buffers, meaning that the next
2937 * time we need to dequeue a buffer since we've now queued up
2938 * all the 'first time' buffers.
2939 */
2940 fileio->cur_index = fileio->initial_index;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002941 }
2942
2943 /*
2944 * Return proper number of bytes processed.
2945 */
2946 if (ret == 0)
2947 ret = count;
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002948 return ret;
2949}
2950
2951size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
2952 loff_t *ppos, int nonblocking)
2953{
2954 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
2955}
2956EXPORT_SYMBOL_GPL(vb2_read);
2957
Ricardo Ribalda819585b2013-08-28 04:39:29 -03002958size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002959 loff_t *ppos, int nonblocking)
2960{
Ricardo Ribalda819585b2013-08-28 04:39:29 -03002961 return __vb2_perform_fileio(q, (char __user *) data, count,
2962 ppos, nonblocking, 0);
Marek Szyprowskib25748f2010-12-06 05:56:55 -03002963}
2964EXPORT_SYMBOL_GPL(vb2_write);
2965
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03002966
2967/*
2968 * The following functions are not part of the vb2 core API, but are helper
2969 * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
2970 * and struct vb2_ops.
2971 * They contain boilerplate code that most if not all drivers have to do
2972 * and so they simplify the driver code.
2973 */
2974
2975/* The queue is busy if there is a owner and you are not that owner. */
2976static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
2977{
2978 return vdev->queue->owner && vdev->queue->owner != file->private_data;
2979}
2980
2981/* vb2 ioctl helpers */
2982
2983int vb2_ioctl_reqbufs(struct file *file, void *priv,
2984 struct v4l2_requestbuffers *p)
2985{
2986 struct video_device *vdev = video_devdata(file);
2987 int res = __verify_memory_type(vdev->queue, p->memory, p->type);
2988
2989 if (res)
2990 return res;
2991 if (vb2_queue_is_busy(vdev, file))
2992 return -EBUSY;
2993 res = __reqbufs(vdev->queue, p);
2994 /* If count == 0, then the owner has released all buffers and he
2995 is no longer owner of the queue. Otherwise we have a new owner. */
2996 if (res == 0)
2997 vdev->queue->owner = p->count ? file->private_data : NULL;
2998 return res;
2999}
3000EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
3001
3002int vb2_ioctl_create_bufs(struct file *file, void *priv,
3003 struct v4l2_create_buffers *p)
3004{
3005 struct video_device *vdev = video_devdata(file);
3006 int res = __verify_memory_type(vdev->queue, p->memory, p->format.type);
3007
3008 p->index = vdev->queue->num_buffers;
3009 /* If count == 0, then just check if memory and type are valid.
3010 Any -EBUSY result from __verify_memory_type can be mapped to 0. */
3011 if (p->count == 0)
3012 return res != -EBUSY ? res : 0;
3013 if (res)
3014 return res;
3015 if (vb2_queue_is_busy(vdev, file))
3016 return -EBUSY;
3017 res = __create_bufs(vdev->queue, p);
3018 if (res == 0)
3019 vdev->queue->owner = file->private_data;
3020 return res;
3021}
3022EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
3023
3024int vb2_ioctl_prepare_buf(struct file *file, void *priv,
3025 struct v4l2_buffer *p)
3026{
3027 struct video_device *vdev = video_devdata(file);
3028
3029 if (vb2_queue_is_busy(vdev, file))
3030 return -EBUSY;
3031 return vb2_prepare_buf(vdev->queue, p);
3032}
3033EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
3034
3035int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
3036{
3037 struct video_device *vdev = video_devdata(file);
3038
3039 /* No need to call vb2_queue_is_busy(), anyone can query buffers. */
3040 return vb2_querybuf(vdev->queue, p);
3041}
3042EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
3043
3044int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
3045{
3046 struct video_device *vdev = video_devdata(file);
3047
3048 if (vb2_queue_is_busy(vdev, file))
3049 return -EBUSY;
3050 return vb2_qbuf(vdev->queue, p);
3051}
3052EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
3053
3054int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
3055{
3056 struct video_device *vdev = video_devdata(file);
3057
3058 if (vb2_queue_is_busy(vdev, file))
3059 return -EBUSY;
3060 return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
3061}
3062EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
3063
3064int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
3065{
3066 struct video_device *vdev = video_devdata(file);
3067
3068 if (vb2_queue_is_busy(vdev, file))
3069 return -EBUSY;
3070 return vb2_streamon(vdev->queue, i);
3071}
3072EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
3073
3074int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
3075{
3076 struct video_device *vdev = video_devdata(file);
3077
3078 if (vb2_queue_is_busy(vdev, file))
3079 return -EBUSY;
3080 return vb2_streamoff(vdev->queue, i);
3081}
3082EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
3083
Tomasz Stanislawski83ae7c52012-06-14 11:32:24 -03003084int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
3085{
3086 struct video_device *vdev = video_devdata(file);
3087
3088 if (vb2_queue_is_busy(vdev, file))
3089 return -EBUSY;
3090 return vb2_expbuf(vdev->queue, p);
3091}
3092EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
3093
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003094/* v4l2_file_operations helpers */
3095
3096int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
3097{
3098 struct video_device *vdev = video_devdata(file);
Laurent Pinchart8a90f1a2013-08-02 13:55:21 -03003099 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3100 int err;
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003101
Laurent Pinchart8a90f1a2013-08-02 13:55:21 -03003102 if (lock && mutex_lock_interruptible(lock))
3103 return -ERESTARTSYS;
3104 err = vb2_mmap(vdev->queue, vma);
3105 if (lock)
3106 mutex_unlock(lock);
3107 return err;
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003108}
3109EXPORT_SYMBOL_GPL(vb2_fop_mmap);
3110
Ricardo Ribalda1380f572013-11-25 05:49:02 -03003111int _vb2_fop_release(struct file *file, struct mutex *lock)
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003112{
3113 struct video_device *vdev = video_devdata(file);
3114
3115 if (file->private_data == vdev->queue->owner) {
Ricardo Ribalda1380f572013-11-25 05:49:02 -03003116 if (lock)
3117 mutex_lock(lock);
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003118 vb2_queue_release(vdev->queue);
3119 vdev->queue->owner = NULL;
Ricardo Ribalda1380f572013-11-25 05:49:02 -03003120 if (lock)
3121 mutex_unlock(lock);
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003122 }
3123 return v4l2_fh_release(file);
3124}
Ricardo Ribalda1380f572013-11-25 05:49:02 -03003125EXPORT_SYMBOL_GPL(_vb2_fop_release);
3126
3127int vb2_fop_release(struct file *file)
3128{
3129 struct video_device *vdev = video_devdata(file);
3130 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3131
3132 return _vb2_fop_release(file, lock);
3133}
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003134EXPORT_SYMBOL_GPL(vb2_fop_release);
3135
Ricardo Ribalda819585b2013-08-28 04:39:29 -03003136ssize_t vb2_fop_write(struct file *file, const char __user *buf,
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003137 size_t count, loff_t *ppos)
3138{
3139 struct video_device *vdev = video_devdata(file);
3140 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003141 int err = -EBUSY;
3142
Hans Verkuilcf533732012-07-31 04:02:25 -03003143 if (lock && mutex_lock_interruptible(lock))
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003144 return -ERESTARTSYS;
3145 if (vb2_queue_is_busy(vdev, file))
3146 goto exit;
3147 err = vb2_write(vdev->queue, buf, count, ppos,
3148 file->f_flags & O_NONBLOCK);
Hans Verkuil8c82c752012-09-07 12:50:02 -03003149 if (vdev->queue->fileio)
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003150 vdev->queue->owner = file->private_data;
3151exit:
Hans Verkuilcf533732012-07-31 04:02:25 -03003152 if (lock)
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003153 mutex_unlock(lock);
3154 return err;
3155}
3156EXPORT_SYMBOL_GPL(vb2_fop_write);
3157
3158ssize_t vb2_fop_read(struct file *file, char __user *buf,
3159 size_t count, loff_t *ppos)
3160{
3161 struct video_device *vdev = video_devdata(file);
3162 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003163 int err = -EBUSY;
3164
Hans Verkuilcf533732012-07-31 04:02:25 -03003165 if (lock && mutex_lock_interruptible(lock))
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003166 return -ERESTARTSYS;
3167 if (vb2_queue_is_busy(vdev, file))
3168 goto exit;
3169 err = vb2_read(vdev->queue, buf, count, ppos,
3170 file->f_flags & O_NONBLOCK);
Hans Verkuil8c82c752012-09-07 12:50:02 -03003171 if (vdev->queue->fileio)
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003172 vdev->queue->owner = file->private_data;
3173exit:
Hans Verkuilcf533732012-07-31 04:02:25 -03003174 if (lock)
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003175 mutex_unlock(lock);
3176 return err;
3177}
3178EXPORT_SYMBOL_GPL(vb2_fop_read);
3179
3180unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
3181{
3182 struct video_device *vdev = video_devdata(file);
3183 struct vb2_queue *q = vdev->queue;
3184 struct mutex *lock = q->lock ? q->lock : vdev->lock;
3185 unsigned long req_events = poll_requested_events(wait);
3186 unsigned res;
3187 void *fileio;
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003188 bool must_lock = false;
3189
3190 /* Try to be smart: only lock if polling might start fileio,
3191 otherwise locking will only introduce unwanted delays. */
3192 if (q->num_buffers == 0 && q->fileio == NULL) {
3193 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
3194 (req_events & (POLLIN | POLLRDNORM)))
3195 must_lock = true;
3196 else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
3197 (req_events & (POLLOUT | POLLWRNORM)))
3198 must_lock = true;
3199 }
3200
3201 /* If locking is needed, but this helper doesn't know how, then you
3202 shouldn't be using this helper but you should write your own. */
Hans Verkuilcf533732012-07-31 04:02:25 -03003203 WARN_ON(must_lock && !lock);
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003204
Hans Verkuilcf533732012-07-31 04:02:25 -03003205 if (must_lock && lock && mutex_lock_interruptible(lock))
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003206 return POLLERR;
3207
3208 fileio = q->fileio;
3209
3210 res = vb2_poll(vdev->queue, file, wait);
3211
3212 /* If fileio was started, then we have a new queue owner. */
3213 if (must_lock && !fileio && q->fileio)
3214 q->owner = file->private_data;
Hans Verkuilcf533732012-07-31 04:02:25 -03003215 if (must_lock && lock)
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003216 mutex_unlock(lock);
3217 return res;
3218}
3219EXPORT_SYMBOL_GPL(vb2_fop_poll);
3220
3221#ifndef CONFIG_MMU
3222unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
3223 unsigned long len, unsigned long pgoff, unsigned long flags)
3224{
3225 struct video_device *vdev = video_devdata(file);
Laurent Pinchart8a90f1a2013-08-02 13:55:21 -03003226 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3227 int ret;
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003228
Laurent Pinchart8a90f1a2013-08-02 13:55:21 -03003229 if (lock && mutex_lock_interruptible(lock))
3230 return -ERESTARTSYS;
3231 ret = vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
3232 if (lock)
3233 mutex_unlock(lock);
3234 return ret;
Hans Verkuil4c1ffca2012-07-02 05:59:18 -03003235}
3236EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
3237#endif
3238
3239/* vb2_ops helpers. Only use if vq->lock is non-NULL. */
3240
3241void vb2_ops_wait_prepare(struct vb2_queue *vq)
3242{
3243 mutex_unlock(vq->lock);
3244}
3245EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
3246
3247void vb2_ops_wait_finish(struct vb2_queue *vq)
3248{
3249 mutex_lock(vq->lock);
3250}
3251EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
3252
Pawel Osciake23ccc02010-10-11 10:56:41 -03003253MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
Pawel Osciak95072082011-03-13 15:23:32 -03003254MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
Pawel Osciake23ccc02010-10-11 10:56:41 -03003255MODULE_LICENSE("GPL");