blob: 0c29a019bc89b25f4f645d59cde70fc3f60703ee [file] [log] [blame]
Magnus Damm2cc45cf2008-07-16 21:33:39 -03001/*
2 * helper functions for physically contiguous capture buffers
3 *
4 * The functions support hardware lacking scatter gather support
5 * (i.e. the buffers must be linear in physical memory)
6 *
7 * Copyright (c) 2008 Magnus Damm
8 *
9 * Based on videobuf-vmalloc.c,
10 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2
15 */
16
17#include <linux/init.h>
18#include <linux/module.h>
Hans Verkuilf19ad392008-07-18 02:02:50 -030019#include <linux/mm.h>
Magnus Damm720b17e2009-06-16 15:32:36 -070020#include <linux/pagemap.h>
Magnus Damm2cc45cf2008-07-16 21:33:39 -030021#include <linux/dma-mapping.h>
22#include <media/videobuf-dma-contig.h>
23
24struct videobuf_dma_contig_memory {
25 u32 magic;
26 void *vaddr;
27 dma_addr_t dma_handle;
28 unsigned long size;
Magnus Damm720b17e2009-06-16 15:32:36 -070029 int is_userptr;
Magnus Damm2cc45cf2008-07-16 21:33:39 -030030};
31
32#define MAGIC_DC_MEM 0x0733ac61
Guennadi Liakhovetskic60f2b52008-07-17 17:30:47 -030033#define MAGIC_CHECK(is, should) \
34 if (unlikely((is) != (should))) { \
35 pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
36 BUG(); \
Magnus Damm2cc45cf2008-07-16 21:33:39 -030037 }
38
39static void
40videobuf_vm_open(struct vm_area_struct *vma)
41{
42 struct videobuf_mapping *map = vma->vm_private_data;
43
44 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
45 map, map->count, vma->vm_start, vma->vm_end);
46
47 map->count++;
48}
49
50static void videobuf_vm_close(struct vm_area_struct *vma)
51{
52 struct videobuf_mapping *map = vma->vm_private_data;
53 struct videobuf_queue *q = map->q;
54 int i;
55
56 dev_dbg(map->q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
57 map, map->count, vma->vm_start, vma->vm_end);
58
59 map->count--;
60 if (0 == map->count) {
61 struct videobuf_dma_contig_memory *mem;
62
63 dev_dbg(map->q->dev, "munmap %p q=%p\n", map, q);
64 mutex_lock(&q->vb_lock);
65
66 /* We need first to cancel streams, before unmapping */
67 if (q->streaming)
68 videobuf_queue_cancel(q);
69
70 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
71 if (NULL == q->bufs[i])
72 continue;
73
74 if (q->bufs[i]->map != map)
75 continue;
76
77 mem = q->bufs[i]->priv;
78 if (mem) {
79 /* This callback is called only if kernel has
80 allocated memory and this memory is mmapped.
81 In this case, memory should be freed,
82 in order to do memory unmap.
83 */
84
85 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
86
87 /* vfree is not atomic - can't be
88 called with IRQ's disabled
89 */
90 dev_dbg(map->q->dev, "buf[%d] freeing %p\n",
91 i, mem->vaddr);
92
93 dma_free_coherent(q->dev, mem->size,
94 mem->vaddr, mem->dma_handle);
95 mem->vaddr = NULL;
96 }
97
98 q->bufs[i]->map = NULL;
99 q->bufs[i]->baddr = 0;
100 }
101
102 kfree(map);
103
104 mutex_unlock(&q->vb_lock);
105 }
106}
107
108static struct vm_operations_struct videobuf_vm_ops = {
109 .open = videobuf_vm_open,
110 .close = videobuf_vm_close,
111};
112
Magnus Damm720b17e2009-06-16 15:32:36 -0700113/**
114 * videobuf_dma_contig_user_put() - reset pointer to user space buffer
115 * @mem: per-buffer private videobuf-dma-contig data
116 *
117 * This function resets the user space pointer
118 */
119static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
120{
121 mem->is_userptr = 0;
122 mem->dma_handle = 0;
123 mem->size = 0;
124}
125
126/**
127 * videobuf_dma_contig_user_get() - setup user space memory pointer
128 * @mem: per-buffer private videobuf-dma-contig data
129 * @vb: video buffer to map
130 *
131 * This function validates and sets up a pointer to user space memory.
132 * Only physically contiguous pfn-mapped memory is accepted.
133 *
134 * Returns 0 if successful.
135 */
136static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
137 struct videobuf_buffer *vb)
138{
139 struct mm_struct *mm = current->mm;
140 struct vm_area_struct *vma;
141 unsigned long prev_pfn, this_pfn;
142 unsigned long pages_done, user_address;
143 int ret;
144
145 mem->size = PAGE_ALIGN(vb->size);
146 mem->is_userptr = 0;
147 ret = -EINVAL;
148
149 down_read(&mm->mmap_sem);
150
151 vma = find_vma(mm, vb->baddr);
152 if (!vma)
153 goto out_up;
154
155 if ((vb->baddr + mem->size) > vma->vm_end)
156 goto out_up;
157
158 pages_done = 0;
159 prev_pfn = 0; /* kill warning */
160 user_address = vb->baddr;
161
162 while (pages_done < (mem->size >> PAGE_SHIFT)) {
163 ret = follow_pfn(vma, user_address, &this_pfn);
164 if (ret)
165 break;
166
167 if (pages_done == 0)
168 mem->dma_handle = this_pfn << PAGE_SHIFT;
169 else if (this_pfn != (prev_pfn + 1))
170 ret = -EFAULT;
171
172 if (ret)
173 break;
174
175 prev_pfn = this_pfn;
176 user_address += PAGE_SIZE;
177 pages_done++;
178 }
179
180 if (!ret)
181 mem->is_userptr = 1;
182
183 out_up:
184 up_read(&current->mm->mmap_sem);
185
186 return ret;
187}
188
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300189static void *__videobuf_alloc(size_t size)
190{
191 struct videobuf_dma_contig_memory *mem;
192 struct videobuf_buffer *vb;
193
194 vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
195 if (vb) {
196 mem = vb->priv = ((char *)vb) + size;
197 mem->magic = MAGIC_DC_MEM;
198 }
199
200 return vb;
201}
202
203static void *__videobuf_to_vmalloc(struct videobuf_buffer *buf)
204{
205 struct videobuf_dma_contig_memory *mem = buf->priv;
206
207 BUG_ON(!mem);
208 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
209
210 return mem->vaddr;
211}
212
213static int __videobuf_iolock(struct videobuf_queue *q,
214 struct videobuf_buffer *vb,
215 struct v4l2_framebuffer *fbuf)
216{
217 struct videobuf_dma_contig_memory *mem = vb->priv;
218
219 BUG_ON(!mem);
220 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
221
222 switch (vb->memory) {
223 case V4L2_MEMORY_MMAP:
224 dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
225
226 /* All handling should be done by __videobuf_mmap_mapper() */
227 if (!mem->vaddr) {
228 dev_err(q->dev, "memory is not alloced/mmapped.\n");
229 return -EINVAL;
230 }
231 break;
232 case V4L2_MEMORY_USERPTR:
233 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
234
Magnus Damm720b17e2009-06-16 15:32:36 -0700235 /* handle pointer from user space */
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300236 if (vb->baddr)
Magnus Damm720b17e2009-06-16 15:32:36 -0700237 return videobuf_dma_contig_user_get(mem, vb);
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300238
Magnus Damm720b17e2009-06-16 15:32:36 -0700239 /* allocate memory for the read() method */
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300240 mem->size = PAGE_ALIGN(vb->size);
241 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
242 &mem->dma_handle, GFP_KERNEL);
243 if (!mem->vaddr) {
244 dev_err(q->dev, "dma_alloc_coherent %ld failed\n",
245 mem->size);
246 return -ENOMEM;
247 }
248
249 dev_dbg(q->dev, "dma_alloc_coherent data is at %p (%ld)\n",
250 mem->vaddr, mem->size);
251 break;
252 case V4L2_MEMORY_OVERLAY:
253 default:
254 dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n",
255 __func__);
256 return -EINVAL;
257 }
258
259 return 0;
260}
261
262static int __videobuf_sync(struct videobuf_queue *q,
263 struct videobuf_buffer *buf)
264{
265 struct videobuf_dma_contig_memory *mem = buf->priv;
266
267 BUG_ON(!mem);
268 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
269
270 dma_sync_single_for_cpu(q->dev, mem->dma_handle, mem->size,
271 DMA_FROM_DEVICE);
272 return 0;
273}
274
275static int __videobuf_mmap_free(struct videobuf_queue *q)
276{
277 unsigned int i;
278
279 dev_dbg(q->dev, "%s\n", __func__);
280 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
281 if (q->bufs[i] && q->bufs[i]->map)
282 return -EBUSY;
283 }
284
285 return 0;
286}
287
288static int __videobuf_mmap_mapper(struct videobuf_queue *q,
289 struct vm_area_struct *vma)
290{
291 struct videobuf_dma_contig_memory *mem;
292 struct videobuf_mapping *map;
293 unsigned int first;
294 int retval;
295 unsigned long size, offset = vma->vm_pgoff << PAGE_SHIFT;
296
297 dev_dbg(q->dev, "%s\n", __func__);
298 if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
299 return -EINVAL;
300
301 /* look for first buffer to map */
302 for (first = 0; first < VIDEO_MAX_FRAME; first++) {
303 if (!q->bufs[first])
304 continue;
305
306 if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
307 continue;
308 if (q->bufs[first]->boff == offset)
309 break;
310 }
311 if (VIDEO_MAX_FRAME == first) {
312 dev_dbg(q->dev, "invalid user space offset [offset=0x%lx]\n",
313 offset);
314 return -EINVAL;
315 }
316
317 /* create mapping + update buffer list */
318 map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
319 if (!map)
320 return -ENOMEM;
321
322 q->bufs[first]->map = map;
323 map->start = vma->vm_start;
324 map->end = vma->vm_end;
325 map->q = q;
326
327 q->bufs[first]->baddr = vma->vm_start;
328
329 mem = q->bufs[first]->priv;
330 BUG_ON(!mem);
331 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
332
333 mem->size = PAGE_ALIGN(q->bufs[first]->bsize);
334 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
335 &mem->dma_handle, GFP_KERNEL);
336 if (!mem->vaddr) {
337 dev_err(q->dev, "dma_alloc_coherent size %ld failed\n",
338 mem->size);
339 goto error;
340 }
341 dev_dbg(q->dev, "dma_alloc_coherent data is at addr %p (size %ld)\n",
342 mem->vaddr, mem->size);
343
344 /* Try to remap memory */
345
346 size = vma->vm_end - vma->vm_start;
347 size = (size < mem->size) ? size : mem->size;
348
349 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
350 retval = remap_pfn_range(vma, vma->vm_start,
351 mem->dma_handle >> PAGE_SHIFT,
352 size, vma->vm_page_prot);
353 if (retval) {
354 dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
355 dma_free_coherent(q->dev, mem->size,
356 mem->vaddr, mem->dma_handle);
357 goto error;
358 }
359
360 vma->vm_ops = &videobuf_vm_ops;
361 vma->vm_flags |= VM_DONTEXPAND;
362 vma->vm_private_data = map;
363
364 dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
365 map, q, vma->vm_start, vma->vm_end,
366 (long int) q->bufs[first]->bsize,
367 vma->vm_pgoff, first);
368
369 videobuf_vm_open(vma);
370
371 return 0;
372
373error:
374 kfree(map);
375 return -ENOMEM;
376}
377
378static int __videobuf_copy_to_user(struct videobuf_queue *q,
379 char __user *data, size_t count,
380 int nonblocking)
381{
382 struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
383 void *vaddr;
384
385 BUG_ON(!mem);
386 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
387 BUG_ON(!mem->vaddr);
388
389 /* copy to userspace */
390 if (count > q->read_buf->size - q->read_off)
391 count = q->read_buf->size - q->read_off;
392
393 vaddr = mem->vaddr;
394
395 if (copy_to_user(data, vaddr + q->read_off, count))
396 return -EFAULT;
397
398 return count;
399}
400
401static int __videobuf_copy_stream(struct videobuf_queue *q,
402 char __user *data, size_t count, size_t pos,
403 int vbihack, int nonblocking)
404{
405 unsigned int *fc;
406 struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
407
408 BUG_ON(!mem);
409 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
410
411 if (vbihack) {
412 /* dirty, undocumented hack -- pass the frame counter
413 * within the last four bytes of each vbi data block.
414 * We need that one to maintain backward compatibility
415 * to all vbi decoding software out there ... */
416 fc = (unsigned int *)mem->vaddr;
417 fc += (q->read_buf->size >> 2) - 1;
418 *fc = q->read_buf->field_count >> 1;
419 dev_dbg(q->dev, "vbihack: %d\n", *fc);
420 }
421
422 /* copy stuff using the common method */
423 count = __videobuf_copy_to_user(q, data, count, nonblocking);
424
425 if ((count == -EFAULT) && (pos == 0))
426 return -EFAULT;
427
428 return count;
429}
430
431static struct videobuf_qtype_ops qops = {
432 .magic = MAGIC_QTYPE_OPS,
433
434 .alloc = __videobuf_alloc,
435 .iolock = __videobuf_iolock,
436 .sync = __videobuf_sync,
437 .mmap_free = __videobuf_mmap_free,
438 .mmap_mapper = __videobuf_mmap_mapper,
439 .video_copy_to_user = __videobuf_copy_to_user,
440 .copy_stream = __videobuf_copy_stream,
441 .vmalloc = __videobuf_to_vmalloc,
442};
443
444void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
445 struct videobuf_queue_ops *ops,
446 struct device *dev,
447 spinlock_t *irqlock,
448 enum v4l2_buf_type type,
449 enum v4l2_field field,
450 unsigned int msize,
451 void *priv)
452{
453 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
454 priv, &qops);
455}
456EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
457
458dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
459{
460 struct videobuf_dma_contig_memory *mem = buf->priv;
461
462 BUG_ON(!mem);
463 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
464
465 return mem->dma_handle;
466}
467EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
468
469void videobuf_dma_contig_free(struct videobuf_queue *q,
470 struct videobuf_buffer *buf)
471{
472 struct videobuf_dma_contig_memory *mem = buf->priv;
473
474 /* mmapped memory can't be freed here, otherwise mmapped region
475 would be released, while still needed. In this case, the memory
476 release should happen inside videobuf_vm_close().
477 So, it should free memory only if the memory were allocated for
478 read() operation.
479 */
Magnus Damm720b17e2009-06-16 15:32:36 -0700480 if (buf->memory != V4L2_MEMORY_USERPTR)
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300481 return;
482
483 if (!mem)
484 return;
485
486 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
487
Magnus Damm720b17e2009-06-16 15:32:36 -0700488 /* handle user space pointer case */
489 if (buf->baddr) {
490 videobuf_dma_contig_user_put(mem);
491 return;
492 }
493
494 /* read() method */
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300495 dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
496 mem->vaddr = NULL;
497}
498EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
499
500MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
501MODULE_AUTHOR("Magnus Damm");
502MODULE_LICENSE("GPL");