blob: 3a43ba0959bf15a1a23f7152c03efa6c1449bc78 [file] [log] [blame]
Magnus Damm2cc45cf2008-07-16 21:33:39 -03001/*
2 * helper functions for physically contiguous capture buffers
3 *
4 * The functions support hardware lacking scatter gather support
5 * (i.e. the buffers must be linear in physical memory)
6 *
7 * Copyright (c) 2008 Magnus Damm
8 *
9 * Based on videobuf-vmalloc.c,
10 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2
15 */
16
17#include <linux/init.h>
18#include <linux/module.h>
Hans Verkuilf19ad392008-07-18 02:02:50 -030019#include <linux/mm.h>
Magnus Damm720b17e2009-06-16 15:32:36 -070020#include <linux/pagemap.h>
Magnus Damm2cc45cf2008-07-16 21:33:39 -030021#include <linux/dma-mapping.h>
Guennadi Liakhovetskif39c1ab2009-11-09 16:11:34 -030022#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Magnus Damm2cc45cf2008-07-16 21:33:39 -030024#include <media/videobuf-dma-contig.h>
25
26struct videobuf_dma_contig_memory {
27 u32 magic;
28 void *vaddr;
29 dma_addr_t dma_handle;
Federico Vagaa8f3c202012-04-12 12:39:37 -030030 bool cached;
Magnus Damm2cc45cf2008-07-16 21:33:39 -030031 unsigned long size;
32};
33
34#define MAGIC_DC_MEM 0x0733ac61
Guennadi Liakhovetskic60f2b52008-07-17 17:30:47 -030035#define MAGIC_CHECK(is, should) \
36 if (unlikely((is) != (should))) { \
37 pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
38 BUG(); \
Magnus Damm2cc45cf2008-07-16 21:33:39 -030039 }
40
Federico Vagaa8f3c202012-04-12 12:39:37 -030041static int __videobuf_dc_alloc(struct device *dev,
42 struct videobuf_dma_contig_memory *mem,
Dan Carpenter50fbe322012-05-24 11:56:41 -030043 unsigned long size, gfp_t flags)
Federico Vagaa8f3c202012-04-12 12:39:37 -030044{
45 mem->size = size;
46 if (mem->cached) {
47 mem->vaddr = alloc_pages_exact(mem->size, flags | GFP_DMA);
48 if (mem->vaddr) {
49 int err;
50
51 mem->dma_handle = dma_map_single(dev, mem->vaddr,
52 mem->size,
53 DMA_FROM_DEVICE);
54 err = dma_mapping_error(dev, mem->dma_handle);
55 if (err) {
56 dev_err(dev, "dma_map_single failed\n");
57
58 free_pages_exact(mem->vaddr, mem->size);
Sachin Kamat79ef87e2012-07-12 07:39:50 -030059 mem->vaddr = NULL;
Federico Vagaa8f3c202012-04-12 12:39:37 -030060 return err;
61 }
62 }
63 } else
64 mem->vaddr = dma_alloc_coherent(dev, mem->size,
65 &mem->dma_handle, flags);
66
67 if (!mem->vaddr) {
68 dev_err(dev, "memory alloc size %ld failed\n", mem->size);
69 return -ENOMEM;
70 }
71
72 dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size);
73
74 return 0;
75}
76
77static void __videobuf_dc_free(struct device *dev,
78 struct videobuf_dma_contig_memory *mem)
79{
80 if (mem->cached) {
81 if (!mem->vaddr)
82 return;
83 dma_unmap_single(dev, mem->dma_handle, mem->size,
84 DMA_FROM_DEVICE);
85 free_pages_exact(mem->vaddr, mem->size);
86 } else
87 dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
88
89 mem->vaddr = NULL;
90}
91
92static void videobuf_vm_open(struct vm_area_struct *vma)
Magnus Damm2cc45cf2008-07-16 21:33:39 -030093{
94 struct videobuf_mapping *map = vma->vm_private_data;
95
96 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
97 map, map->count, vma->vm_start, vma->vm_end);
98
99 map->count++;
100}
101
102static void videobuf_vm_close(struct vm_area_struct *vma)
103{
104 struct videobuf_mapping *map = vma->vm_private_data;
105 struct videobuf_queue *q = map->q;
106 int i;
107
Guennadi Liakhovetskif35f1bb2010-03-23 11:52:11 -0300108 dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300109 map, map->count, vma->vm_start, vma->vm_end);
110
111 map->count--;
112 if (0 == map->count) {
113 struct videobuf_dma_contig_memory *mem;
114
Guennadi Liakhovetskif35f1bb2010-03-23 11:52:11 -0300115 dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
Hans Verkuil97397682010-09-20 17:24:30 -0300116 videobuf_queue_lock(q);
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300117
118 /* We need first to cancel streams, before unmapping */
119 if (q->streaming)
120 videobuf_queue_cancel(q);
121
122 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
123 if (NULL == q->bufs[i])
124 continue;
125
126 if (q->bufs[i]->map != map)
127 continue;
128
129 mem = q->bufs[i]->priv;
130 if (mem) {
131 /* This callback is called only if kernel has
132 allocated memory and this memory is mmapped.
133 In this case, memory should be freed,
134 in order to do memory unmap.
135 */
136
137 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
138
139 /* vfree is not atomic - can't be
140 called with IRQ's disabled
141 */
Guennadi Liakhovetskif35f1bb2010-03-23 11:52:11 -0300142 dev_dbg(q->dev, "buf[%d] freeing %p\n",
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300143 i, mem->vaddr);
144
Federico Vagaa8f3c202012-04-12 12:39:37 -0300145 __videobuf_dc_free(q->dev, mem);
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300146 mem->vaddr = NULL;
147 }
148
Federico Vagaa8f3c202012-04-12 12:39:37 -0300149 q->bufs[i]->map = NULL;
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300150 q->bufs[i]->baddr = 0;
151 }
152
153 kfree(map);
154
Hans Verkuil97397682010-09-20 17:24:30 -0300155 videobuf_queue_unlock(q);
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300156 }
157}
158
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +0400159static const struct vm_operations_struct videobuf_vm_ops = {
Federico Vagaa8f3c202012-04-12 12:39:37 -0300160 .open = videobuf_vm_open,
161 .close = videobuf_vm_close,
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300162};
163
Magnus Damm720b17e2009-06-16 15:32:36 -0700164/**
165 * videobuf_dma_contig_user_put() - reset pointer to user space buffer
166 * @mem: per-buffer private videobuf-dma-contig data
167 *
168 * This function resets the user space pointer
169 */
170static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
171{
Magnus Damm720b17e2009-06-16 15:32:36 -0700172 mem->dma_handle = 0;
173 mem->size = 0;
174}
175
176/**
177 * videobuf_dma_contig_user_get() - setup user space memory pointer
178 * @mem: per-buffer private videobuf-dma-contig data
179 * @vb: video buffer to map
180 *
181 * This function validates and sets up a pointer to user space memory.
182 * Only physically contiguous pfn-mapped memory is accepted.
183 *
184 * Returns 0 if successful.
185 */
186static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
187 struct videobuf_buffer *vb)
188{
189 struct mm_struct *mm = current->mm;
190 struct vm_area_struct *vma;
191 unsigned long prev_pfn, this_pfn;
192 unsigned long pages_done, user_address;
Muralidharan Karicheri31bedfa2009-12-10 16:47:48 -0300193 unsigned int offset;
Magnus Damm720b17e2009-06-16 15:32:36 -0700194 int ret;
195
Muralidharan Karicheri31bedfa2009-12-10 16:47:48 -0300196 offset = vb->baddr & ~PAGE_MASK;
197 mem->size = PAGE_ALIGN(vb->size + offset);
Magnus Damm720b17e2009-06-16 15:32:36 -0700198 ret = -EINVAL;
199
200 down_read(&mm->mmap_sem);
201
202 vma = find_vma(mm, vb->baddr);
203 if (!vma)
204 goto out_up;
205
206 if ((vb->baddr + mem->size) > vma->vm_end)
207 goto out_up;
208
209 pages_done = 0;
210 prev_pfn = 0; /* kill warning */
211 user_address = vb->baddr;
212
213 while (pages_done < (mem->size >> PAGE_SHIFT)) {
214 ret = follow_pfn(vma, user_address, &this_pfn);
215 if (ret)
216 break;
217
218 if (pages_done == 0)
Muralidharan Karicheri31bedfa2009-12-10 16:47:48 -0300219 mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
Magnus Damm720b17e2009-06-16 15:32:36 -0700220 else if (this_pfn != (prev_pfn + 1))
221 ret = -EFAULT;
222
223 if (ret)
224 break;
225
226 prev_pfn = this_pfn;
227 user_address += PAGE_SIZE;
228 pages_done++;
229 }
230
Federico Vagaa8f3c202012-04-12 12:39:37 -0300231out_up:
Magnus Damm720b17e2009-06-16 15:32:36 -0700232 up_read(&current->mm->mmap_sem);
233
234 return ret;
235}
236
Federico Vagaa8f3c202012-04-12 12:39:37 -0300237static struct videobuf_buffer *__videobuf_alloc_vb(size_t size, bool cached)
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300238{
239 struct videobuf_dma_contig_memory *mem;
240 struct videobuf_buffer *vb;
241
242 vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
243 if (vb) {
Federico Vagaa8f3c202012-04-12 12:39:37 -0300244 vb->priv = ((char *)vb) + size;
245 mem = vb->priv;
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300246 mem->magic = MAGIC_DC_MEM;
Federico Vagaa8f3c202012-04-12 12:39:37 -0300247 mem->cached = cached;
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300248 }
249
250 return vb;
251}
252
Federico Vagaa8f3c202012-04-12 12:39:37 -0300253static struct videobuf_buffer *__videobuf_alloc_uncached(size_t size)
254{
255 return __videobuf_alloc_vb(size, false);
256}
257
258static struct videobuf_buffer *__videobuf_alloc_cached(size_t size)
259{
260 return __videobuf_alloc_vb(size, true);
261}
262
Hans Verkuil037c75e2010-03-28 08:18:37 -0300263static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300264{
265 struct videobuf_dma_contig_memory *mem = buf->priv;
266
267 BUG_ON(!mem);
268 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
269
270 return mem->vaddr;
271}
272
273static int __videobuf_iolock(struct videobuf_queue *q,
274 struct videobuf_buffer *vb,
275 struct v4l2_framebuffer *fbuf)
276{
277 struct videobuf_dma_contig_memory *mem = vb->priv;
278
279 BUG_ON(!mem);
280 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
281
282 switch (vb->memory) {
283 case V4L2_MEMORY_MMAP:
284 dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
285
286 /* All handling should be done by __videobuf_mmap_mapper() */
287 if (!mem->vaddr) {
288 dev_err(q->dev, "memory is not alloced/mmapped.\n");
289 return -EINVAL;
290 }
291 break;
292 case V4L2_MEMORY_USERPTR:
293 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
294
Magnus Damm720b17e2009-06-16 15:32:36 -0700295 /* handle pointer from user space */
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300296 if (vb->baddr)
Magnus Damm720b17e2009-06-16 15:32:36 -0700297 return videobuf_dma_contig_user_get(mem, vb);
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300298
Magnus Damm720b17e2009-06-16 15:32:36 -0700299 /* allocate memory for the read() method */
Federico Vagaa8f3c202012-04-12 12:39:37 -0300300 if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size),
301 GFP_KERNEL))
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300302 return -ENOMEM;
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300303 break;
304 case V4L2_MEMORY_OVERLAY:
305 default:
Federico Vagaa8f3c202012-04-12 12:39:37 -0300306 dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__);
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300307 return -EINVAL;
308 }
309
310 return 0;
311}
312
Federico Vagaa8f3c202012-04-12 12:39:37 -0300313static int __videobuf_sync(struct videobuf_queue *q,
314 struct videobuf_buffer *buf)
315{
316 struct videobuf_dma_contig_memory *mem = buf->priv;
317 BUG_ON(!mem);
318 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
319
320 dma_sync_single_for_cpu(q->dev, mem->dma_handle, mem->size,
321 DMA_FROM_DEVICE);
322
323 return 0;
324}
325
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300326static int __videobuf_mmap_mapper(struct videobuf_queue *q,
Hans Verkuil0b62b732010-03-28 09:09:05 -0300327 struct videobuf_buffer *buf,
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300328 struct vm_area_struct *vma)
329{
330 struct videobuf_dma_contig_memory *mem;
331 struct videobuf_mapping *map;
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300332 int retval;
Hans Verkuil0b62b732010-03-28 09:09:05 -0300333 unsigned long size;
Federico Vagaa8f3c202012-04-12 12:39:37 -0300334 unsigned long pos, start = vma->vm_start;
335 struct page *page;
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300336
337 dev_dbg(q->dev, "%s\n", __func__);
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300338
339 /* create mapping + update buffer list */
340 map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
341 if (!map)
342 return -ENOMEM;
343
Hans Verkuil0b62b732010-03-28 09:09:05 -0300344 buf->map = map;
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300345 map->q = q;
346
Hans Verkuil0b62b732010-03-28 09:09:05 -0300347 buf->baddr = vma->vm_start;
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300348
Hans Verkuil0b62b732010-03-28 09:09:05 -0300349 mem = buf->priv;
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300350 BUG_ON(!mem);
351 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
352
Federico Vagaa8f3c202012-04-12 12:39:37 -0300353 if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize),
354 GFP_KERNEL | __GFP_COMP))
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300355 goto error;
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300356
357 /* Try to remap memory */
358
359 size = vma->vm_end - vma->vm_start;
360 size = (size < mem->size) ? size : mem->size;
361
Lad, Prabhakar40990402012-06-22 06:19:28 -0300362 if (!mem->cached) {
Federico Vagaa8f3c202012-04-12 12:39:37 -0300363 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
Lad, Prabhakar40990402012-06-22 06:19:28 -0300364 retval = remap_pfn_range(vma, vma->vm_start,
365 mem->dma_handle >> PAGE_SHIFT,
366 size, vma->vm_page_prot);
Federico Vagaa8f3c202012-04-12 12:39:37 -0300367 if (retval) {
Lad, Prabhakar40990402012-06-22 06:19:28 -0300368 dev_err(q->dev, "mmap: remap failed with error %d. ",
369 retval);
370 dma_free_coherent(q->dev, mem->size,
371 mem->vaddr, mem->dma_handle);
Federico Vagaa8f3c202012-04-12 12:39:37 -0300372 goto error;
373 }
Lad, Prabhakar40990402012-06-22 06:19:28 -0300374 } else {
375 pos = (unsigned long)mem->vaddr;
Federico Vagaa8f3c202012-04-12 12:39:37 -0300376
Lad, Prabhakar40990402012-06-22 06:19:28 -0300377 while (size > 0) {
378 page = virt_to_page((void *)pos);
379 if (NULL == page) {
380 dev_err(q->dev, "mmap: virt_to_page failed\n");
381 __videobuf_dc_free(q->dev, mem);
382 goto error;
383 }
384 retval = vm_insert_page(vma, start, page);
385 if (retval) {
386 dev_err(q->dev, "mmap: insert failed with error %d\n",
387 retval);
388 __videobuf_dc_free(q->dev, mem);
389 goto error;
390 }
391 start += PAGE_SIZE;
392 pos += PAGE_SIZE;
393
394 if (size > PAGE_SIZE)
395 size -= PAGE_SIZE;
396 else
397 size = 0;
398 }
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300399 }
400
Federico Vagaa8f3c202012-04-12 12:39:37 -0300401 vma->vm_ops = &videobuf_vm_ops;
402 vma->vm_flags |= VM_DONTEXPAND;
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300403 vma->vm_private_data = map;
404
405 dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
406 map, q, vma->vm_start, vma->vm_end,
Federico Vagaa8f3c202012-04-12 12:39:37 -0300407 (long int)buf->bsize, vma->vm_pgoff, buf->i);
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300408
409 videobuf_vm_open(vma);
410
411 return 0;
412
413error:
414 kfree(map);
415 return -ENOMEM;
416}
417
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300418static struct videobuf_qtype_ops qops = {
Federico Vagaa8f3c202012-04-12 12:39:37 -0300419 .magic = MAGIC_QTYPE_OPS,
420 .alloc_vb = __videobuf_alloc_uncached,
421 .iolock = __videobuf_iolock,
422 .mmap_mapper = __videobuf_mmap_mapper,
423 .vaddr = __videobuf_to_vaddr,
424};
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300425
Federico Vagaa8f3c202012-04-12 12:39:37 -0300426static struct videobuf_qtype_ops qops_cached = {
427 .magic = MAGIC_QTYPE_OPS,
428 .alloc_vb = __videobuf_alloc_cached,
429 .iolock = __videobuf_iolock,
430 .sync = __videobuf_sync,
431 .mmap_mapper = __videobuf_mmap_mapper,
432 .vaddr = __videobuf_to_vaddr,
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300433};
434
435void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
Jonathan Corbet38a54f32009-11-17 19:43:41 -0300436 const struct videobuf_queue_ops *ops,
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300437 struct device *dev,
438 spinlock_t *irqlock,
439 enum v4l2_buf_type type,
440 enum v4l2_field field,
441 unsigned int msize,
Hans Verkuil08bff032010-09-20 17:39:46 -0300442 void *priv,
443 struct mutex *ext_lock)
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300444{
445 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
Hans Verkuil08bff032010-09-20 17:39:46 -0300446 priv, &qops, ext_lock);
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300447}
448EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
449
Federico Vagaa8f3c202012-04-12 12:39:37 -0300450void videobuf_queue_dma_contig_init_cached(struct videobuf_queue *q,
451 const struct videobuf_queue_ops *ops,
452 struct device *dev,
453 spinlock_t *irqlock,
454 enum v4l2_buf_type type,
455 enum v4l2_field field,
456 unsigned int msize,
457 void *priv, struct mutex *ext_lock)
458{
459 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
460 priv, &qops_cached, ext_lock);
461}
462EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init_cached);
463
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300464dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
465{
466 struct videobuf_dma_contig_memory *mem = buf->priv;
467
468 BUG_ON(!mem);
469 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
470
471 return mem->dma_handle;
472}
473EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
474
475void videobuf_dma_contig_free(struct videobuf_queue *q,
476 struct videobuf_buffer *buf)
477{
478 struct videobuf_dma_contig_memory *mem = buf->priv;
479
480 /* mmapped memory can't be freed here, otherwise mmapped region
481 would be released, while still needed. In this case, the memory
482 release should happen inside videobuf_vm_close().
483 So, it should free memory only if the memory were allocated for
484 read() operation.
485 */
Magnus Damm720b17e2009-06-16 15:32:36 -0700486 if (buf->memory != V4L2_MEMORY_USERPTR)
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300487 return;
488
489 if (!mem)
490 return;
491
492 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
493
Magnus Damm720b17e2009-06-16 15:32:36 -0700494 /* handle user space pointer case */
495 if (buf->baddr) {
496 videobuf_dma_contig_user_put(mem);
497 return;
498 }
499
500 /* read() method */
Pawel Osciakb2b476f2010-07-20 13:49:16 -0300501 if (mem->vaddr) {
Federico Vagaa8f3c202012-04-12 12:39:37 -0300502 __videobuf_dc_free(q->dev, mem);
Pawel Osciakb2b476f2010-07-20 13:49:16 -0300503 mem->vaddr = NULL;
504 }
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300505}
506EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
507
508MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
509MODULE_AUTHOR("Magnus Damm");
510MODULE_LICENSE("GPL");