blob: a7be7d7d5db5b047ec1b3eafb11943a034be380a [file] [log] [blame]
Laurent Pinchartad614ac2011-02-12 18:05:06 -03001/*
2 * ispqueue.c
3 *
4 * TI OMAP3 ISP - Video buffers queue handling
5 *
6 * Copyright (C) 2010 Nokia Corporation
7 *
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#include <asm/cacheflush.h>
27#include <linux/dma-mapping.h>
28#include <linux/mm.h>
Laurent Pinchart90004272014-01-02 22:12:42 -030029#include <linux/omap-iommu.h>
Laurent Pinchartad614ac2011-02-12 18:05:06 -030030#include <linux/pagemap.h>
31#include <linux/poll.h>
32#include <linux/scatterlist.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/vmalloc.h>
36
Laurent Pinchart90004272014-01-02 22:12:42 -030037#include "isp.h"
Laurent Pinchartad614ac2011-02-12 18:05:06 -030038#include "ispqueue.h"
Laurent Pinchart90004272014-01-02 22:12:42 -030039#include "ispvideo.h"
40
41/* -----------------------------------------------------------------------------
42 * IOMMU management
43 */
44
45#define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
46
47/*
Laurent Pinchart73c1ea42014-01-06 16:21:54 -030048 * ispmmu_vmap - Wrapper for virtual memory mapping of a scatter gather table
Laurent Pinchart90004272014-01-02 22:12:42 -030049 * @dev: Device pointer specific to the OMAP3 ISP.
Laurent Pinchart73c1ea42014-01-06 16:21:54 -030050 * @sgt: Pointer to source scatter gather table.
Laurent Pinchart90004272014-01-02 22:12:42 -030051 *
52 * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if
53 * we ran out of memory.
54 */
55static dma_addr_t
Laurent Pinchart73c1ea42014-01-06 16:21:54 -030056ispmmu_vmap(struct isp_device *isp, const struct sg_table *sgt)
Laurent Pinchart90004272014-01-02 22:12:42 -030057{
Laurent Pinchart73c1ea42014-01-06 16:21:54 -030058 return omap_iommu_vmap(isp->domain, isp->dev, 0, sgt, IOMMU_FLAG);
Laurent Pinchart90004272014-01-02 22:12:42 -030059}
60
61/*
62 * ispmmu_vunmap - Unmap a device address from the ISP MMU
63 * @dev: Device pointer specific to the OMAP3 ISP.
64 * @da: Device address generated from a ispmmu_vmap call.
65 */
66static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
67{
Laurent Pinchart73c1ea42014-01-06 16:21:54 -030068 omap_iommu_vunmap(isp->domain, isp->dev, (u32)da);
Laurent Pinchart90004272014-01-02 22:12:42 -030069}
Laurent Pinchartad614ac2011-02-12 18:05:06 -030070
71/* -----------------------------------------------------------------------------
72 * Video buffers management
73 */
74
75/*
76 * isp_video_buffer_cache_sync - Keep the buffers coherent between CPU and ISP
77 *
78 * The typical operation required here is Cache Invalidation across
79 * the (user space) buffer address range. And this _must_ be done
80 * at QBUF stage (and *only* at QBUF).
81 *
82 * We try to use optimal cache invalidation function:
83 * - dmac_map_area:
84 * - used when the number of pages are _low_.
85 * - it becomes quite slow as the number of pages increase.
86 * - for 648x492 viewfinder (150 pages) it takes 1.3 ms.
87 * - for 5 Mpix buffer (2491 pages) it takes between 25-50 ms.
88 *
89 * - flush_cache_all:
90 * - used when the number of pages are _high_.
91 * - time taken in the range of 500-900 us.
92 * - has a higher penalty but, as whole dcache + icache is invalidated
93 */
94/*
95 * FIXME: dmac_inv_range crashes randomly on the user space buffer
96 * address. Fall back to flush_cache_all for now.
97 */
98#define ISP_CACHE_FLUSH_PAGES_MAX 0
99
100static void isp_video_buffer_cache_sync(struct isp_video_buffer *buf)
101{
102 if (buf->skip_cache)
103 return;
104
105 if (buf->vbuf.m.userptr == 0 || buf->npages == 0 ||
106 buf->npages > ISP_CACHE_FLUSH_PAGES_MAX)
107 flush_cache_all();
108 else {
109 dmac_map_area((void *)buf->vbuf.m.userptr, buf->vbuf.length,
110 DMA_FROM_DEVICE);
111 outer_inv_range(buf->vbuf.m.userptr,
112 buf->vbuf.m.userptr + buf->vbuf.length);
113 }
114}
115
116/*
117 * isp_video_buffer_lock_vma - Prevent VMAs from being unmapped
118 *
119 * Lock the VMAs underlying the given buffer into memory. This avoids the
120 * userspace buffer mapping from being swapped out, making VIPT cache handling
121 * easier.
122 *
123 * Note that the pages will not be freed as the buffers have been locked to
124 * memory using by a call to get_user_pages(), but the userspace mapping could
125 * still disappear if the VMAs are not locked. This is caused by the memory
126 * management code trying to be as lock-less as possible, which results in the
127 * userspace mapping manager not finding out that the pages are locked under
128 * some conditions.
129 */
130static int isp_video_buffer_lock_vma(struct isp_video_buffer *buf, int lock)
131{
132 struct vm_area_struct *vma;
133 unsigned long start;
134 unsigned long end;
135 int ret = 0;
136
137 if (buf->vbuf.memory == V4L2_MEMORY_MMAP)
138 return 0;
139
140 /* We can be called from workqueue context if the current task dies to
141 * unlock the VMAs. In that case there's no current memory management
142 * context so unlocking can't be performed, but the VMAs have been or
143 * are getting destroyed anyway so it doesn't really matter.
144 */
145 if (!current || !current->mm)
146 return lock ? -EINVAL : 0;
147
148 start = buf->vbuf.m.userptr;
149 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
150
151 down_write(&current->mm->mmap_sem);
152 spin_lock(&current->mm->page_table_lock);
153
154 do {
155 vma = find_vma(current->mm, start);
156 if (vma == NULL) {
157 ret = -EFAULT;
158 goto out;
159 }
160
161 if (lock)
162 vma->vm_flags |= VM_LOCKED;
163 else
164 vma->vm_flags &= ~VM_LOCKED;
165
166 start = vma->vm_end + 1;
167 } while (vma->vm_end < end);
168
169 if (lock)
170 buf->vm_flags |= VM_LOCKED;
171 else
172 buf->vm_flags &= ~VM_LOCKED;
173
174out:
175 spin_unlock(&current->mm->page_table_lock);
176 up_write(&current->mm->mmap_sem);
177 return ret;
178}
179
180/*
Laurent Pinchart82716012014-01-06 15:30:03 -0300181 * isp_video_buffer_prepare_kernel - Build scatter list for a vmalloc'ed buffer
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300182 *
183 * Iterate over the vmalloc'ed area and create a scatter list entry for every
184 * page.
185 */
Laurent Pinchart82716012014-01-06 15:30:03 -0300186static int isp_video_buffer_prepare_kernel(struct isp_video_buffer *buf)
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300187{
Laurent Pinchart73c1ea42014-01-06 16:21:54 -0300188 struct scatterlist *sg;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300189 unsigned int npages;
190 unsigned int i;
191 void *addr;
Laurent Pinchart73c1ea42014-01-06 16:21:54 -0300192 int ret;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300193
194 addr = buf->vaddr;
195 npages = PAGE_ALIGN(buf->vbuf.length) >> PAGE_SHIFT;
196
Laurent Pinchart73c1ea42014-01-06 16:21:54 -0300197 ret = sg_alloc_table(&buf->sgt, npages, GFP_KERNEL);
198 if (ret < 0)
199 return ret;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300200
Laurent Pinchart73c1ea42014-01-06 16:21:54 -0300201 for (sg = buf->sgt.sgl, i = 0; i < npages; ++i, addr += PAGE_SIZE) {
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300202 struct page *page = vmalloc_to_page(addr);
203
204 if (page == NULL || PageHighMem(page)) {
Laurent Pinchart73c1ea42014-01-06 16:21:54 -0300205 sg_free_table(&buf->sgt);
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300206 return -EINVAL;
207 }
208
Laurent Pinchart73c1ea42014-01-06 16:21:54 -0300209 sg_set_page(sg, page, PAGE_SIZE, 0);
210 sg = sg_next(sg);
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300211 }
212
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300213 return 0;
214}
215
216/*
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300217 * isp_video_buffer_cleanup - Release pages for a userspace VMA.
218 *
219 * Release pages locked by a call isp_video_buffer_prepare_user and free the
220 * pages table.
221 */
222static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
223{
Laurent Pinchart90004272014-01-02 22:12:42 -0300224 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
225 struct isp_video *video = vfh->video;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300226 enum dma_data_direction direction;
227 unsigned int i;
228
Laurent Pinchart90004272014-01-02 22:12:42 -0300229 if (buf->dma) {
230 ispmmu_vunmap(video->isp, buf->dma);
231 buf->dma = 0;
232 }
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300233
234 if (!(buf->vm_flags & VM_PFNMAP)) {
235 direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
236 ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
Laurent Pinchart73c1ea42014-01-06 16:21:54 -0300237 dma_unmap_sg(buf->queue->dev, buf->sgt.sgl, buf->sgt.orig_nents,
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300238 direction);
239 }
240
Laurent Pinchart73c1ea42014-01-06 16:21:54 -0300241 sg_free_table(&buf->sgt);
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300242
243 if (buf->pages != NULL) {
244 isp_video_buffer_lock_vma(buf, 0);
245
246 for (i = 0; i < buf->npages; ++i)
247 page_cache_release(buf->pages[i]);
248
249 vfree(buf->pages);
250 buf->pages = NULL;
251 }
252
253 buf->npages = 0;
254 buf->skip_cache = false;
255}
256
257/*
Laurent Pinchart82716012014-01-06 15:30:03 -0300258 * isp_video_buffer_prepare_user - Prepare a userspace buffer.
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300259 *
Laurent Pinchart82716012014-01-06 15:30:03 -0300260 * This function creates a scatter list with a 1:1 mapping for a userspace VMA.
261 * The number of pages is first computed based on the buffer size, and pages are
262 * then retrieved by a call to get_user_pages.
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300263 *
264 * Pages are pinned to memory by get_user_pages, making them available for DMA
265 * transfers. However, due to memory management optimization, it seems the
266 * get_user_pages doesn't guarantee that the pinned pages will not be written
267 * to swap and removed from the userspace mapping(s). When this happens, a page
268 * fault can be generated when accessing those unmapped pages.
269 *
270 * If the fault is triggered by a page table walk caused by VIPT cache
271 * management operations, the page fault handler might oops if the MM semaphore
272 * is held, as it can't handle kernel page faults in that case. To fix that, a
273 * fixup entry needs to be added to the cache management code, or the userspace
274 * VMA must be locked to avoid removing pages from the userspace mapping in the
275 * first place.
276 *
277 * If the number of pages retrieved is smaller than the number required by the
278 * buffer size, the function returns -EFAULT.
279 */
280static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf)
281{
Laurent Pinchart82716012014-01-06 15:30:03 -0300282 struct scatterlist *sg;
283 unsigned int offset;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300284 unsigned long data;
285 unsigned int first;
286 unsigned int last;
Laurent Pinchart82716012014-01-06 15:30:03 -0300287 unsigned int i;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300288 int ret;
289
290 data = buf->vbuf.m.userptr;
291 first = (data & PAGE_MASK) >> PAGE_SHIFT;
292 last = ((data + buf->vbuf.length - 1) & PAGE_MASK) >> PAGE_SHIFT;
Laurent Pinchart82716012014-01-06 15:30:03 -0300293 offset = data & ~PAGE_MASK;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300294
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300295 buf->npages = last - first + 1;
296 buf->pages = vmalloc(buf->npages * sizeof(buf->pages[0]));
297 if (buf->pages == NULL)
298 return -ENOMEM;
299
300 down_read(&current->mm->mmap_sem);
301 ret = get_user_pages(current, current->mm, data & PAGE_MASK,
302 buf->npages,
303 buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE, 0,
304 buf->pages, NULL);
305 up_read(&current->mm->mmap_sem);
306
307 if (ret != buf->npages) {
Laurent Pinchart2578dfb2011-04-07 13:30:14 -0300308 buf->npages = ret < 0 ? 0 : ret;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300309 return -EFAULT;
310 }
311
312 ret = isp_video_buffer_lock_vma(buf, 1);
313 if (ret < 0)
Laurent Pinchart82716012014-01-06 15:30:03 -0300314 return ret;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300315
Laurent Pinchart82716012014-01-06 15:30:03 -0300316 ret = sg_alloc_table(&buf->sgt, buf->npages, GFP_KERNEL);
317 if (ret < 0)
318 return ret;
319
320 for (sg = buf->sgt.sgl, i = 0; i < buf->npages; ++i) {
321 if (PageHighMem(buf->pages[i])) {
322 sg_free_table(&buf->sgt);
323 return -EINVAL;
324 }
325
326 sg_set_page(sg, buf->pages[i], PAGE_SIZE - offset, offset);
327 sg = sg_next(sg);
328 offset = 0;
329 }
330
331 return 0;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300332}
333
334/*
Laurent Pinchart82716012014-01-06 15:30:03 -0300335 * isp_video_buffer_prepare_pfnmap - Prepare a VM_PFNMAP userspace buffer
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300336 *
337 * Userspace VM_PFNMAP buffers are supported only if they are contiguous in
Laurent Pinchart82716012014-01-06 15:30:03 -0300338 * memory and if they span a single VMA. Start by validating the user pointer to
339 * make sure it fulfils that condition, and then build a scatter list of
340 * physically contiguous pages starting at the buffer memory physical address.
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300341 *
Laurent Pinchart82716012014-01-06 15:30:03 -0300342 * Return 0 on success, -EFAULT if the buffer isn't valid or -ENOMEM if memory
343 * can't be allocated.
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300344 */
345static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf)
346{
347 struct vm_area_struct *vma;
Laurent Pinchart82716012014-01-06 15:30:03 -0300348 struct scatterlist *sg;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300349 unsigned long prev_pfn;
350 unsigned long this_pfn;
351 unsigned long start;
Laurent Pinchart82716012014-01-06 15:30:03 -0300352 unsigned int offset;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300353 unsigned long end;
Laurent Pinchart82716012014-01-06 15:30:03 -0300354 unsigned long pfn;
355 unsigned int i;
356 int ret = 0;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300357
358 start = buf->vbuf.m.userptr;
359 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
Laurent Pinchart82716012014-01-06 15:30:03 -0300360 offset = start & ~PAGE_MASK;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300361
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300362 buf->npages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
363 buf->pages = NULL;
364
365 down_read(&current->mm->mmap_sem);
366 vma = find_vma(current->mm, start);
Laurent Pinchart82716012014-01-06 15:30:03 -0300367 if (vma == NULL || vma->vm_end < end) {
368 ret = -EFAULT;
369 goto unlock;
370 }
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300371
372 for (prev_pfn = 0; start <= end; start += PAGE_SIZE) {
373 ret = follow_pfn(vma, start, &this_pfn);
Laurent Pinchart82716012014-01-06 15:30:03 -0300374 if (ret < 0)
375 goto unlock;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300376
377 if (prev_pfn == 0)
Laurent Pinchart82716012014-01-06 15:30:03 -0300378 pfn = this_pfn;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300379 else if (this_pfn != prev_pfn + 1) {
380 ret = -EFAULT;
Laurent Pinchart82716012014-01-06 15:30:03 -0300381 goto unlock;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300382 }
383
384 prev_pfn = this_pfn;
385 }
386
Laurent Pinchart82716012014-01-06 15:30:03 -0300387unlock:
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300388 up_read(&current->mm->mmap_sem);
Laurent Pinchart82716012014-01-06 15:30:03 -0300389 if (ret < 0)
390 return ret;
391
392 ret = sg_alloc_table(&buf->sgt, buf->npages, GFP_KERNEL);
393 if (ret < 0)
394 return ret;
395
396 for (sg = buf->sgt.sgl, i = 0; i < buf->npages; ++i, ++pfn) {
397 sg_set_page(sg, pfn_to_page(pfn), PAGE_SIZE - offset, offset);
398 /* PFNMAP buffers will not get DMA-mapped, set the DMA address
399 * manually.
400 */
401 sg_dma_address(sg) = (pfn << PAGE_SHIFT) + offset;
402 sg = sg_next(sg);
403 offset = 0;
404 }
405
406 return 0;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300407}
408
409/*
410 * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address
411 *
412 * This function locates the VMAs for the buffer's userspace address and checks
Michael Jones2d4e9d12011-02-28 08:29:03 -0300413 * that their flags match. The only flag that we need to care for at the moment
414 * is VM_PFNMAP.
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300415 *
416 * The buffer vm_flags field is set to the first VMA flags.
417 *
418 * Return -EFAULT if no VMA can be found for part of the buffer, or if the VMAs
419 * have incompatible flags.
420 */
421static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf)
422{
423 struct vm_area_struct *vma;
Laurent Pincharte19bc862012-12-17 04:52:48 -0300424 pgprot_t uninitialized_var(vm_page_prot);
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300425 unsigned long start;
426 unsigned long end;
427 int ret = -EFAULT;
428
429 start = buf->vbuf.m.userptr;
430 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
431
432 down_read(&current->mm->mmap_sem);
433
434 do {
435 vma = find_vma(current->mm, start);
436 if (vma == NULL)
437 goto done;
438
439 if (start == buf->vbuf.m.userptr) {
440 buf->vm_flags = vma->vm_flags;
441 vm_page_prot = vma->vm_page_prot;
442 }
443
444 if ((buf->vm_flags ^ vma->vm_flags) & VM_PFNMAP)
445 goto done;
446
447 if (vm_page_prot != vma->vm_page_prot)
448 goto done;
449
450 start = vma->vm_end + 1;
451 } while (vma->vm_end < end);
452
453 /* Skip cache management to enhance performances for non-cached or
454 * write-combining buffers.
455 */
456 if (vm_page_prot == pgprot_noncached(vm_page_prot) ||
457 vm_page_prot == pgprot_writecombine(vm_page_prot))
458 buf->skip_cache = true;
459
460 ret = 0;
461
462done:
463 up_read(&current->mm->mmap_sem);
464 return ret;
465}
466
467/*
468 * isp_video_buffer_prepare - Make a buffer ready for operation
469 *
470 * Preparing a buffer involves:
471 *
472 * - validating VMAs (userspace buffers only)
473 * - locking pages and VMAs into memory (userspace buffers only)
474 * - building page and scatter-gather lists
475 * - mapping buffers for DMA operation
476 * - performing driver-specific preparation
477 *
478 * The function must be called in userspace context with a valid mm context
479 * (this excludes cleanup paths such as sys_close when the userspace process
480 * segfaults).
481 */
482static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
483{
Laurent Pinchart90004272014-01-02 22:12:42 -0300484 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
485 struct isp_video *video = vfh->video;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300486 enum dma_data_direction direction;
Laurent Pinchart90004272014-01-02 22:12:42 -0300487 unsigned long addr;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300488 int ret;
489
490 switch (buf->vbuf.memory) {
491 case V4L2_MEMORY_MMAP:
Laurent Pinchart82716012014-01-06 15:30:03 -0300492 ret = isp_video_buffer_prepare_kernel(buf);
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300493 break;
494
495 case V4L2_MEMORY_USERPTR:
496 ret = isp_video_buffer_prepare_vm_flags(buf);
497 if (ret < 0)
498 return ret;
499
Laurent Pinchart82716012014-01-06 15:30:03 -0300500 if (buf->vm_flags & VM_PFNMAP)
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300501 ret = isp_video_buffer_prepare_pfnmap(buf);
Laurent Pinchart82716012014-01-06 15:30:03 -0300502 else
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300503 ret = isp_video_buffer_prepare_user(buf);
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300504 break;
505
506 default:
507 return -EINVAL;
508 }
509
510 if (ret < 0)
511 goto done;
512
513 if (!(buf->vm_flags & VM_PFNMAP)) {
514 direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
515 ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
Laurent Pinchart73c1ea42014-01-06 16:21:54 -0300516 ret = dma_map_sg(buf->queue->dev, buf->sgt.sgl,
517 buf->sgt.orig_nents, direction);
518 if (ret != buf->sgt.orig_nents) {
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300519 ret = -EFAULT;
520 goto done;
521 }
522 }
523
Laurent Pinchart73c1ea42014-01-06 16:21:54 -0300524 addr = ispmmu_vmap(video->isp, &buf->sgt);
Laurent Pinchart90004272014-01-02 22:12:42 -0300525 if (IS_ERR_VALUE(addr)) {
526 ret = -EIO;
527 goto done;
528 }
529
530 buf->dma = addr;
531
532 if (!IS_ALIGNED(addr, 32)) {
533 dev_dbg(video->isp->dev,
534 "Buffer address must be aligned to 32 bytes boundary.\n");
535 ret = -EINVAL;
536 goto done;
537 }
538
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300539 if (buf->queue->ops->buffer_prepare)
540 ret = buf->queue->ops->buffer_prepare(buf);
541
542done:
543 if (ret < 0) {
544 isp_video_buffer_cleanup(buf);
545 return ret;
546 }
547
548 return ret;
549}
550
551/*
552 * isp_video_queue_query - Query the status of a given buffer
553 *
554 * Locking: must be called with the queue lock held.
555 */
556static void isp_video_buffer_query(struct isp_video_buffer *buf,
557 struct v4l2_buffer *vbuf)
558{
559 memcpy(vbuf, &buf->vbuf, sizeof(*vbuf));
560
561 if (buf->vma_use_count)
562 vbuf->flags |= V4L2_BUF_FLAG_MAPPED;
563
564 switch (buf->state) {
565 case ISP_BUF_STATE_ERROR:
566 vbuf->flags |= V4L2_BUF_FLAG_ERROR;
Laurent Pinchart792e8ec2013-12-09 22:46:17 -0300567 /* Fallthrough */
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300568 case ISP_BUF_STATE_DONE:
569 vbuf->flags |= V4L2_BUF_FLAG_DONE;
Laurent Pinchart792e8ec2013-12-09 22:46:17 -0300570 break;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300571 case ISP_BUF_STATE_QUEUED:
572 case ISP_BUF_STATE_ACTIVE:
573 vbuf->flags |= V4L2_BUF_FLAG_QUEUED;
574 break;
575 case ISP_BUF_STATE_IDLE:
576 default:
577 break;
578 }
579}
580
581/*
582 * isp_video_buffer_wait - Wait for a buffer to be ready
583 *
584 * In non-blocking mode, return immediately with 0 if the buffer is ready or
585 * -EAGAIN if the buffer is in the QUEUED or ACTIVE state.
586 *
587 * In blocking mode, wait (interruptibly but with no timeout) on the buffer wait
588 * queue using the same condition.
589 */
590static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking)
591{
592 if (nonblocking) {
593 return (buf->state != ISP_BUF_STATE_QUEUED &&
594 buf->state != ISP_BUF_STATE_ACTIVE)
595 ? 0 : -EAGAIN;
596 }
597
598 return wait_event_interruptible(buf->wait,
599 buf->state != ISP_BUF_STATE_QUEUED &&
600 buf->state != ISP_BUF_STATE_ACTIVE);
601}
602
603/* -----------------------------------------------------------------------------
604 * Queue management
605 */
606
607/*
608 * isp_video_queue_free - Free video buffers memory
609 *
610 * Buffers can only be freed if the queue isn't streaming and if no buffer is
Lad, Prabhakar25aeb412014-02-21 09:07:21 -0300611 * mapped to userspace. Return -EBUSY if those conditions aren't satisfied.
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300612 *
613 * This function must be called with the queue lock held.
614 */
615static int isp_video_queue_free(struct isp_video_queue *queue)
616{
617 unsigned int i;
618
619 if (queue->streaming)
620 return -EBUSY;
621
622 for (i = 0; i < queue->count; ++i) {
623 if (queue->buffers[i]->vma_use_count != 0)
624 return -EBUSY;
625 }
626
627 for (i = 0; i < queue->count; ++i) {
628 struct isp_video_buffer *buf = queue->buffers[i];
629
630 isp_video_buffer_cleanup(buf);
631
632 vfree(buf->vaddr);
633 buf->vaddr = NULL;
634
635 kfree(buf);
636 queue->buffers[i] = NULL;
637 }
638
639 INIT_LIST_HEAD(&queue->queue);
640 queue->count = 0;
641 return 0;
642}
643
644/*
645 * isp_video_queue_alloc - Allocate video buffers memory
646 *
647 * This function must be called with the queue lock held.
648 */
649static int isp_video_queue_alloc(struct isp_video_queue *queue,
650 unsigned int nbuffers,
651 unsigned int size, enum v4l2_memory memory)
652{
653 struct isp_video_buffer *buf;
654 unsigned int i;
655 void *mem;
656 int ret;
657
658 /* Start by freeing the buffers. */
659 ret = isp_video_queue_free(queue);
660 if (ret < 0)
661 return ret;
662
Michael Jones9d380ad2012-07-26 10:48:25 -0300663 /* Bail out if no buffers should be allocated. */
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300664 if (nbuffers == 0)
665 return 0;
666
667 /* Initialize the allocated buffers. */
668 for (i = 0; i < nbuffers; ++i) {
669 buf = kzalloc(queue->bufsize, GFP_KERNEL);
670 if (buf == NULL)
671 break;
672
673 if (memory == V4L2_MEMORY_MMAP) {
674 /* Allocate video buffers memory for mmap mode. Align
675 * the size to the page size.
676 */
677 mem = vmalloc_32_user(PAGE_ALIGN(size));
678 if (mem == NULL) {
679 kfree(buf);
680 break;
681 }
682
683 buf->vbuf.m.offset = i * PAGE_ALIGN(size);
684 buf->vaddr = mem;
685 }
686
687 buf->vbuf.index = i;
688 buf->vbuf.length = size;
689 buf->vbuf.type = queue->type;
Sakari Ailus1b18e7a2012-10-22 17:10:16 -0300690 buf->vbuf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300691 buf->vbuf.field = V4L2_FIELD_NONE;
692 buf->vbuf.memory = memory;
693
694 buf->queue = queue;
695 init_waitqueue_head(&buf->wait);
696
697 queue->buffers[i] = buf;
698 }
699
700 if (i == 0)
701 return -ENOMEM;
702
703 queue->count = i;
704 return nbuffers;
705}
706
707/**
708 * omap3isp_video_queue_cleanup - Clean up the video buffers queue
709 * @queue: Video buffers queue
710 *
711 * Free all allocated resources and clean up the video buffers queue. The queue
712 * must not be busy (no ongoing video stream) and buffers must have been
713 * unmapped.
714 *
715 * Return 0 on success or -EBUSY if the queue is busy or buffers haven't been
716 * unmapped.
717 */
718int omap3isp_video_queue_cleanup(struct isp_video_queue *queue)
719{
720 return isp_video_queue_free(queue);
721}
722
723/**
724 * omap3isp_video_queue_init - Initialize the video buffers queue
725 * @queue: Video buffers queue
726 * @type: V4L2 buffer type (capture or output)
727 * @ops: Driver-specific queue operations
728 * @dev: Device used for DMA operations
729 * @bufsize: Size of the driver-specific buffer structure
730 *
731 * Initialize the video buffers queue with the supplied parameters.
732 *
733 * The queue type must be one of V4L2_BUF_TYPE_VIDEO_CAPTURE or
734 * V4L2_BUF_TYPE_VIDEO_OUTPUT. Other buffer types are not supported yet.
735 *
736 * Buffer objects will be allocated using the given buffer size to allow room
737 * for driver-specific fields. Driver-specific buffer structures must start
738 * with a struct isp_video_buffer field. Drivers with no driver-specific buffer
739 * structure must pass the size of the isp_video_buffer structure in the bufsize
740 * parameter.
741 *
742 * Return 0 on success.
743 */
744int omap3isp_video_queue_init(struct isp_video_queue *queue,
745 enum v4l2_buf_type type,
746 const struct isp_video_queue_operations *ops,
747 struct device *dev, unsigned int bufsize)
748{
749 INIT_LIST_HEAD(&queue->queue);
750 mutex_init(&queue->lock);
751 spin_lock_init(&queue->irqlock);
752
753 queue->type = type;
754 queue->ops = ops;
755 queue->dev = dev;
756 queue->bufsize = bufsize;
757
758 return 0;
759}
760
761/* -----------------------------------------------------------------------------
762 * V4L2 operations
763 */
764
765/**
766 * omap3isp_video_queue_reqbufs - Allocate video buffers memory
767 *
768 * This function is intended to be used as a VIDIOC_REQBUFS ioctl handler. It
769 * allocated video buffer objects and, for MMAP buffers, buffer memory.
770 *
771 * If the number of buffers is 0, all buffers are freed and the function returns
772 * without performing any allocation.
773 *
774 * If the number of buffers is not 0, currently allocated buffers (if any) are
775 * freed and the requested number of buffers are allocated. Depending on
776 * driver-specific requirements and on memory availability, a number of buffer
777 * smaller or bigger than requested can be allocated. This isn't considered as
778 * an error.
779 *
780 * Return 0 on success or one of the following error codes:
781 *
782 * -EINVAL if the buffer type or index are invalid
783 * -EBUSY if the queue is busy (streaming or buffers mapped)
784 * -ENOMEM if the buffers can't be allocated due to an out-of-memory condition
785 */
786int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue,
787 struct v4l2_requestbuffers *rb)
788{
789 unsigned int nbuffers = rb->count;
790 unsigned int size;
791 int ret;
792
793 if (rb->type != queue->type)
794 return -EINVAL;
795
796 queue->ops->queue_prepare(queue, &nbuffers, &size);
797 if (size == 0)
798 return -EINVAL;
799
800 nbuffers = min_t(unsigned int, nbuffers, ISP_VIDEO_MAX_BUFFERS);
801
802 mutex_lock(&queue->lock);
803
804 ret = isp_video_queue_alloc(queue, nbuffers, size, rb->memory);
805 if (ret < 0)
806 goto done;
807
808 rb->count = ret;
809 ret = 0;
810
811done:
812 mutex_unlock(&queue->lock);
813 return ret;
814}
815
816/**
817 * omap3isp_video_queue_querybuf - Query the status of a buffer in a queue
818 *
819 * This function is intended to be used as a VIDIOC_QUERYBUF ioctl handler. It
820 * returns the status of a given video buffer.
821 *
822 * Return 0 on success or -EINVAL if the buffer type or index are invalid.
823 */
824int omap3isp_video_queue_querybuf(struct isp_video_queue *queue,
825 struct v4l2_buffer *vbuf)
826{
827 struct isp_video_buffer *buf;
828 int ret = 0;
829
830 if (vbuf->type != queue->type)
831 return -EINVAL;
832
833 mutex_lock(&queue->lock);
834
835 if (vbuf->index >= queue->count) {
836 ret = -EINVAL;
837 goto done;
838 }
839
840 buf = queue->buffers[vbuf->index];
841 isp_video_buffer_query(buf, vbuf);
842
843done:
844 mutex_unlock(&queue->lock);
845 return ret;
846}
847
848/**
849 * omap3isp_video_queue_qbuf - Queue a buffer
850 *
851 * This function is intended to be used as a VIDIOC_QBUF ioctl handler.
852 *
853 * The v4l2_buffer structure passed from userspace is first sanity tested. If
854 * sane, the buffer is then processed and added to the main queue and, if the
855 * queue is streaming, to the IRQ queue.
856 *
857 * Before being enqueued, USERPTR buffers are checked for address changes. If
858 * the buffer has a different userspace address, the old memory area is unlocked
859 * and the new memory area is locked.
860 */
861int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
862 struct v4l2_buffer *vbuf)
863{
864 struct isp_video_buffer *buf;
865 unsigned long flags;
866 int ret = -EINVAL;
867
868 if (vbuf->type != queue->type)
869 goto done;
870
871 mutex_lock(&queue->lock);
872
873 if (vbuf->index >= queue->count)
874 goto done;
875
876 buf = queue->buffers[vbuf->index];
877
878 if (vbuf->memory != buf->vbuf.memory)
879 goto done;
880
881 if (buf->state != ISP_BUF_STATE_IDLE)
882 goto done;
883
884 if (vbuf->memory == V4L2_MEMORY_USERPTR &&
Michael Jones61e65612011-08-09 08:42:20 +0200885 vbuf->length < buf->vbuf.length)
886 goto done;
887
888 if (vbuf->memory == V4L2_MEMORY_USERPTR &&
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300889 vbuf->m.userptr != buf->vbuf.m.userptr) {
890 isp_video_buffer_cleanup(buf);
891 buf->vbuf.m.userptr = vbuf->m.userptr;
892 buf->prepared = 0;
893 }
894
895 if (!buf->prepared) {
896 ret = isp_video_buffer_prepare(buf);
897 if (ret < 0)
898 goto done;
899 buf->prepared = 1;
900 }
901
902 isp_video_buffer_cache_sync(buf);
903
904 buf->state = ISP_BUF_STATE_QUEUED;
905 list_add_tail(&buf->stream, &queue->queue);
906
907 if (queue->streaming) {
908 spin_lock_irqsave(&queue->irqlock, flags);
909 queue->ops->buffer_queue(buf);
910 spin_unlock_irqrestore(&queue->irqlock, flags);
911 }
912
913 ret = 0;
914
915done:
916 mutex_unlock(&queue->lock);
917 return ret;
918}
919
920/**
921 * omap3isp_video_queue_dqbuf - Dequeue a buffer
922 *
923 * This function is intended to be used as a VIDIOC_DQBUF ioctl handler.
924 *
Michael Jonesb1e71f32012-06-27 12:06:57 -0300925 * Wait until a buffer is ready to be dequeued, remove it from the queue and
926 * copy its information to the v4l2_buffer structure.
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300927 *
Michael Jonesb1e71f32012-06-27 12:06:57 -0300928 * If the nonblocking argument is not zero and no buffer is ready, return
929 * -EAGAIN immediately instead of waiting.
930 *
931 * If no buffer has been enqueued, or if the requested buffer type doesn't match
932 * the queue type, return -EINVAL.
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300933 */
934int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue,
935 struct v4l2_buffer *vbuf, int nonblocking)
936{
937 struct isp_video_buffer *buf;
938 int ret;
939
940 if (vbuf->type != queue->type)
941 return -EINVAL;
942
943 mutex_lock(&queue->lock);
944
945 if (list_empty(&queue->queue)) {
946 ret = -EINVAL;
947 goto done;
948 }
949
950 buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
951 ret = isp_video_buffer_wait(buf, nonblocking);
952 if (ret < 0)
953 goto done;
954
955 list_del(&buf->stream);
956
957 isp_video_buffer_query(buf, vbuf);
958 buf->state = ISP_BUF_STATE_IDLE;
959 vbuf->flags &= ~V4L2_BUF_FLAG_QUEUED;
960
961done:
962 mutex_unlock(&queue->lock);
963 return ret;
964}
965
966/**
967 * omap3isp_video_queue_streamon - Start streaming
968 *
969 * This function is intended to be used as a VIDIOC_STREAMON ioctl handler. It
970 * starts streaming on the queue and calls the buffer_queue operation for all
971 * queued buffers.
972 *
973 * Return 0 on success.
974 */
975int omap3isp_video_queue_streamon(struct isp_video_queue *queue)
976{
977 struct isp_video_buffer *buf;
978 unsigned long flags;
979
980 mutex_lock(&queue->lock);
981
982 if (queue->streaming)
983 goto done;
984
985 queue->streaming = 1;
986
987 spin_lock_irqsave(&queue->irqlock, flags);
988 list_for_each_entry(buf, &queue->queue, stream)
989 queue->ops->buffer_queue(buf);
990 spin_unlock_irqrestore(&queue->irqlock, flags);
991
992done:
993 mutex_unlock(&queue->lock);
994 return 0;
995}
996
997/**
998 * omap3isp_video_queue_streamoff - Stop streaming
999 *
1000 * This function is intended to be used as a VIDIOC_STREAMOFF ioctl handler. It
1001 * stops streaming on the queue and wakes up all the buffers.
1002 *
1003 * Drivers must stop the hardware and synchronize with interrupt handlers and/or
1004 * delayed works before calling this function to make sure no buffer will be
1005 * touched by the driver and/or hardware.
1006 */
1007void omap3isp_video_queue_streamoff(struct isp_video_queue *queue)
1008{
1009 struct isp_video_buffer *buf;
1010 unsigned long flags;
1011 unsigned int i;
1012
1013 mutex_lock(&queue->lock);
1014
1015 if (!queue->streaming)
1016 goto done;
1017
1018 queue->streaming = 0;
1019
1020 spin_lock_irqsave(&queue->irqlock, flags);
1021 for (i = 0; i < queue->count; ++i) {
1022 buf = queue->buffers[i];
1023
1024 if (buf->state == ISP_BUF_STATE_ACTIVE)
1025 wake_up(&buf->wait);
1026
1027 buf->state = ISP_BUF_STATE_IDLE;
1028 }
1029 spin_unlock_irqrestore(&queue->irqlock, flags);
1030
1031 INIT_LIST_HEAD(&queue->queue);
1032
1033done:
1034 mutex_unlock(&queue->lock);
1035}
1036
1037/**
1038 * omap3isp_video_queue_discard_done - Discard all buffers marked as DONE
1039 *
1040 * This function is intended to be used with suspend/resume operations. It
1041 * discards all 'done' buffers as they would be too old to be requested after
1042 * resume.
1043 *
1044 * Drivers must stop the hardware and synchronize with interrupt handlers and/or
1045 * delayed works before calling this function to make sure no buffer will be
1046 * touched by the driver and/or hardware.
1047 */
1048void omap3isp_video_queue_discard_done(struct isp_video_queue *queue)
1049{
1050 struct isp_video_buffer *buf;
1051 unsigned int i;
1052
1053 mutex_lock(&queue->lock);
1054
1055 if (!queue->streaming)
1056 goto done;
1057
1058 for (i = 0; i < queue->count; ++i) {
1059 buf = queue->buffers[i];
1060
1061 if (buf->state == ISP_BUF_STATE_DONE)
1062 buf->state = ISP_BUF_STATE_ERROR;
1063 }
1064
1065done:
1066 mutex_unlock(&queue->lock);
1067}
1068
1069static void isp_video_queue_vm_open(struct vm_area_struct *vma)
1070{
1071 struct isp_video_buffer *buf = vma->vm_private_data;
1072
1073 buf->vma_use_count++;
1074}
1075
1076static void isp_video_queue_vm_close(struct vm_area_struct *vma)
1077{
1078 struct isp_video_buffer *buf = vma->vm_private_data;
1079
1080 buf->vma_use_count--;
1081}
1082
1083static const struct vm_operations_struct isp_video_queue_vm_ops = {
1084 .open = isp_video_queue_vm_open,
1085 .close = isp_video_queue_vm_close,
1086};
1087
1088/**
1089 * omap3isp_video_queue_mmap - Map buffers to userspace
1090 *
1091 * This function is intended to be used as an mmap() file operation handler. It
1092 * maps a buffer to userspace based on the VMA offset.
1093 *
1094 * Only buffers of memory type MMAP are supported.
1095 */
1096int omap3isp_video_queue_mmap(struct isp_video_queue *queue,
1097 struct vm_area_struct *vma)
1098{
1099 struct isp_video_buffer *uninitialized_var(buf);
1100 unsigned long size;
1101 unsigned int i;
1102 int ret = 0;
1103
1104 mutex_lock(&queue->lock);
1105
1106 for (i = 0; i < queue->count; ++i) {
1107 buf = queue->buffers[i];
1108 if ((buf->vbuf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
1109 break;
1110 }
1111
1112 if (i == queue->count) {
1113 ret = -EINVAL;
1114 goto done;
1115 }
1116
1117 size = vma->vm_end - vma->vm_start;
1118
1119 if (buf->vbuf.memory != V4L2_MEMORY_MMAP ||
1120 size != PAGE_ALIGN(buf->vbuf.length)) {
1121 ret = -EINVAL;
1122 goto done;
1123 }
1124
1125 ret = remap_vmalloc_range(vma, buf->vaddr, 0);
1126 if (ret < 0)
1127 goto done;
1128
1129 vma->vm_ops = &isp_video_queue_vm_ops;
1130 vma->vm_private_data = buf;
1131 isp_video_queue_vm_open(vma);
1132
1133done:
1134 mutex_unlock(&queue->lock);
1135 return ret;
1136}
1137
1138/**
1139 * omap3isp_video_queue_poll - Poll video queue state
1140 *
1141 * This function is intended to be used as a poll() file operation handler. It
1142 * polls the state of the video buffer at the front of the queue and returns an
1143 * events mask.
1144 *
1145 * If no buffer is present at the front of the queue, POLLERR is returned.
1146 */
1147unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
1148 struct file *file, poll_table *wait)
1149{
1150 struct isp_video_buffer *buf;
1151 unsigned int mask = 0;
1152
1153 mutex_lock(&queue->lock);
1154 if (list_empty(&queue->queue)) {
1155 mask |= POLLERR;
1156 goto done;
1157 }
1158 buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
1159
1160 poll_wait(file, &buf->wait, wait);
1161 if (buf->state == ISP_BUF_STATE_DONE ||
1162 buf->state == ISP_BUF_STATE_ERROR) {
1163 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1164 mask |= POLLIN | POLLRDNORM;
1165 else
1166 mask |= POLLOUT | POLLWRNORM;
1167 }
1168
1169done:
1170 mutex_unlock(&queue->lock);
1171 return mask;
1172}